From cb80f0b680b3d5950a78257c0d080552731bbac3 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 20:00:08 -0700 Subject: [PATCH 01/14] Ignore ViM files Don't accidentally check in ViM swap files, etc. --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 97cca67c6..9e0c605fc 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,8 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +# editor stuff .idea +*.swp +*.swo +*~ From f5cb7ceddc3b3c39c9e1ffda6cac169ec80700e5 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 20:24:16 -0700 Subject: [PATCH 02/14] Package loading This introduces utilities for incremental/filtered package loading, based on go/packages (the new replacement for go/build and friends when importing). It'll work with modules properly, too. --- pkg/loader/doc.go | 60 ++++++ pkg/loader/errors.go | 63 ++++++ pkg/loader/loader.go | 349 ++++++++++++++++++++++++++++++++ pkg/loader/paths.go | 32 +++ pkg/loader/refs.go | 242 ++++++++++++++++++++++ pkg/loader/testutils/helpers.go | 53 +++++ pkg/loader/visit.go | 81 ++++++++ 7 files changed, 880 insertions(+) create mode 100644 pkg/loader/doc.go create mode 100644 pkg/loader/errors.go create mode 100644 pkg/loader/loader.go create mode 100644 pkg/loader/paths.go create mode 100644 pkg/loader/refs.go create mode 100644 pkg/loader/testutils/helpers.go create mode 100644 pkg/loader/visit.go diff --git a/pkg/loader/doc.go b/pkg/loader/doc.go new file mode 100644 index 000000000..a80065ec7 --- /dev/null +++ b/pkg/loader/doc.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loader defines helpers for loading packages from sources. It wraps +// go/packages, allow incremental loading of source code and manual control +// over which packages get type-checked. This allows for faster loading in +// cases where you don't actually care about certain imports. +// +// Because it uses go/packages, it's modules-aware, and works in both modules- +// and non-modules environments. +// +// Loading +// +// The main entrypoint for loading is LoadRoots, which traverse the package +// graph starting at the given patterns (file, package, path, or ...-wildcard, +// as one might pass to go list). Packages beyond the roots can be accessed +// via the Imports() method. Packages are initially loaded with export data +// paths, filenames, and imports. +// +// Packages are suitable for comparison, as each unique package only ever has +// one *Package object returned. +// +// Syntax and TypeChecking +// +// ASTs and type-checking information can be loaded with NeedSyntax and +// NeedTypesInfo, respectively. Both are idempotent -- repeated calls will +// simply re-use the cached contents. Note that NeedTypesInfo will *only* type +// check the current package -- if you want to type-check imports as well, +// you'll need to type-check them first. +// +// Reference Pruning and Recursive Checking +// +// In order to type-check using only the packages you care about, you can use a +// TypeChecker. TypeChecker will visit each top-level type declaration, +// collect (optionally filtered) references, and type-check references +// packages. +// +// Errors +// +// Errors can be added to each package. Use ErrFromNode to create an error +// from an AST node. Errors can then be printed (complete with file and +// position information) using PrintErrors, optionally filtered by error type. +// It's generally a good idea to filter out TypeErrors when doing incomplete +// type-checking with TypeChecker. You can use MaybeErrList to return multiple +// errors if you need to return an error instead of adding it to a package. +// AddError will later unroll it into individual errors. +package loader diff --git a/pkg/loader/errors.go b/pkg/loader/errors.go new file mode 100644 index 000000000..aae8ef39f --- /dev/null +++ b/pkg/loader/errors.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + "go/ast" + "go/token" +) + +// PositionedError represents some error with an associated position. +// The position is tied to some external token.FileSet. +type PositionedError struct { + Pos token.Pos + error +} + +// ErrFromNode returns the given error, with additional information +// attaching it to the given AST node. It will automatically map +// over error lists. +func ErrFromNode(err error, node ast.Node) error { + if asList, isList := err.(ErrList); isList { + resList := make(ErrList, len(asList)) + for i, baseErr := range asList { + resList[i] = ErrFromNode(baseErr, node) + } + return resList + } + return PositionedError{ + Pos: node.Pos(), + error: err, + } +} + +// MaybeErrList constructs an ErrList if the given list of +// errors has any errors, otherwise returning nil. +func MaybeErrList(errs []error) error { + if len(errs) == 0 { + return nil + } + return ErrList(errs) +} + +// ErrList is a list of errors aggregated together into a single error. +type ErrList []error + +func (l ErrList) Error() string { + return fmt.Sprintf("%v", []error(l)) +} diff --git a/pkg/loader/loader.go b/pkg/loader/loader.go new file mode 100644 index 000000000..76b867a71 --- /dev/null +++ b/pkg/loader/loader.go @@ -0,0 +1,349 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "os" + "sync" + + "golang.org/x/tools/go/packages" +) + +// Much of this is strongly inspired by the contents of go/packages, +// except that it allows for lazy loading of syntax and type-checking +// information to speed up cases where full traversal isn't needed. + +// PrintErrors print errors associated with all packages +// in the given package graph, starting at the given root +// packages and traversing through all imports. It will skip +// any errors of the kinds specified in filterKinds. It will +// return true if any errors were printed. +func PrintErrors(pkgs []*Package, filterKinds ...packages.ErrorKind) bool { + pkgsRaw := make([]*packages.Package, len(pkgs)) + for i, pkg := range pkgs { + pkgsRaw[i] = pkg.Package + } + toSkip := make(map[packages.ErrorKind]struct{}) + for _, errKind := range filterKinds { + toSkip[errKind] = struct{}{} + } + hadErrors := false + packages.Visit(pkgsRaw, nil, func(pkgRaw *packages.Package) { + for _, err := range pkgRaw.Errors { + if _, skip := toSkip[err.Kind]; skip { + continue + } + hadErrors = true + fmt.Fprintln(os.Stderr, err) + } + }) + return hadErrors +} + +// Package is a single, unique Go package that can be +// lazily parsed and type-checked. Packages should not +// be constructed directly -- instead, use LoadRoots. +// For a given call to LoadRoots, only a single instance +// of each package exists, and thus they may be used as keys +// and for comparison. +type Package struct { + *packages.Package + + imports map[string]*Package + + loader *loader + sync.Mutex +} + +// Imports returns the imports for the given package, indexed by +// package path (*not* name in any particular file). +func (p *Package) Imports() map[string]*Package { + if p.imports == nil { + p.imports = p.loader.packagesFor(p.Package.Imports) + } + + return p.imports +} + +// NeedTypesInfo indicates that type-checking information is needed for this package. +// Actual type-checking information can be accessed via the Types and TypesInfo fields. +func (p *Package) NeedTypesInfo() { + if p.TypesInfo != nil { + return + } + p.NeedSyntax() + p.loader.typeCheck(p) +} + +// NeedSyntax indicates that a parsed AST is needed for this package. +// Actual ASTs can be accessed via the Syntax field. +func (p *Package) NeedSyntax() { + if p.Syntax != nil { + return + } + out := make([]*ast.File, len(p.CompiledGoFiles)) + var wg sync.WaitGroup + wg.Add(len(p.CompiledGoFiles)) + for i, filename := range p.CompiledGoFiles { + go func(i int, filename string) { + defer wg.Done() + src, err := ioutil.ReadFile(filename) + if err != nil { + p.AddError(err) + return + } + out[i], err = p.loader.parseFile(filename, src) + if err != nil { + p.AddError(err) + return + } + }(i, filename) + } + wg.Wait() + for _, file := range out { + if file == nil { + return + } + } + p.Syntax = out +} + +// AddError adds an error to the errors associated with the given package. +func (p *Package) AddError(err error) { + switch typedErr := err.(type) { + case *os.PathError: + // file-reading errors + p.Errors = append(p.Errors, packages.Error{ + Pos: typedErr.Path + ":1", + Msg: typedErr.Err.Error(), + Kind: packages.ParseError, + }) + case scanner.ErrorList: + // parsing/scanning errors + for _, subErr := range typedErr { + p.Errors = append(p.Errors, packages.Error{ + Pos: subErr.Pos.String(), + Msg: subErr.Msg, + Kind: packages.ParseError, + }) + } + case types.Error: + // type-checking errors + p.Errors = append(p.Errors, packages.Error{ + Pos: typedErr.Fset.Position(typedErr.Pos).String(), + Msg: typedErr.Msg, + Kind: packages.TypeError, + }) + case ErrList: + for _, subErr := range typedErr { + p.AddError(subErr) + } + case PositionedError: + p.Errors = append(p.Errors, packages.Error{ + Pos: p.loader.cfg.Fset.Position(typedErr.Pos).String(), + Msg: typedErr.Error(), + Kind: packages.UnknownError, + }) + default: + // should only happen for external errors, like ref checking + p.Errors = append(p.Errors, packages.Error{ + Pos: p.ID + ":-", + Msg: err.Error(), + Kind: packages.UnknownError, + }) + } +} + +// loader loads packages and their imports. Loaded packages will have +// type size, imports, and exports file information populated. Additional +// information, like ASTs and type-checking information, can be accessed +// via methods on individual packages. +type loader struct { + // Roots are the loaded "root" packages in the package graph loaded via + // LoadRoots. + Roots []*Package + + // cfg contains the package loading config (initialized on demand) + cfg *packages.Config + // packages contains the cache of Packages indexed by the underlying + // package.Package, so that we don't ever produce two Packages with + // the same underlying packages.Package. + packages map[*packages.Package]*Package + packagesMu sync.Mutex +} + +// packageFor returns a wrapped Package for the given packages.Package, +// ensuring that there's a one-to-one mapping between the two. +// It's *not* threadsafe -- use packagesFor for that. +func (l *loader) packageFor(pkgRaw *packages.Package) *Package { + if l.packages[pkgRaw] == nil { + l.packages[pkgRaw] = &Package{ + Package: pkgRaw, + loader: l, + } + } + return l.packages[pkgRaw] +} + +// packagesFor returns a map of Package objects for each packages.Package in the input +// map, ensuring that there's a one-to-one mapping between package.Package and Package +// (as per packageFor). +func (l *loader) packagesFor(pkgsRaw map[string]*packages.Package) map[string]*Package { + l.packagesMu.Lock() + defer l.packagesMu.Unlock() + + out := make(map[string]*Package, len(pkgsRaw)) + for name, rawPkg := range pkgsRaw { + out[name] = l.packageFor(rawPkg) + } + return out +} + +// typeCheck type-checks the given package. +func (l *loader) typeCheck(pkg *Package) { + // don't conflict with typeCheckFromExportData + + pkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + + pkg.Fset = l.cfg.Fset + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + importedPkg := pkg.Imports()[path] + if importedPkg == nil { + return nil, fmt.Errorf("no package information for %q", path) + } + + if importedPkg.Types != nil && importedPkg.Types.Complete() { + return importedPkg.Types, nil + } + + // if we haven't already loaded typecheck data, we don't care about this package's types + return types.NewPackage(importedPkg.PkgPath, importedPkg.Name), nil + }) + + var errs []error + + // type-check + checkConfig := &types.Config{ + Importer: importer, + + IgnoreFuncBodies: true, // we only need decl-level info + + Error: func(err error) { + errs = append(errs, err) + }, + + Sizes: pkg.TypesSizes, + } + if err := types.NewChecker(checkConfig, l.cfg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax); err != nil { + errs = append(errs, err) + } + + // make sure that if a given sub-import is ill-typed, we mark this package as ill-typed as well. + illTyped := len(errs) > 0 + if !illTyped { + for _, importedPkg := range pkg.Imports() { + if importedPkg.IllTyped { + illTyped = true + break + } + } + } + pkg.IllTyped = illTyped + + // publish errors to the package error list. + for _, err := range errs { + pkg.AddError(err) + } +} + +// parseFile parses the given file, including comments. +func (l *loader) parseFile(filename string, src []byte) (*ast.File, error) { + // skip function bodies + file, err := parser.ParseFile(l.cfg.Fset, filename, src, parser.AllErrors|parser.ParseComments) + if err != nil { + return nil, err + } + + return file, nil +} + +// LoadRoots loads the given "root" packages by path, transitively loading +// and all imports as well. +// +// Loaded packages will have type size, imports, and exports file information +// populated. Additional information, like ASTs and type-checking information, +// can be accessed via methods on individual packages. +func LoadRoots(roots ...string) ([]*Package, error) { + return LoadRootsWithConfig(&packages.Config{}, roots...) +} + +// LoadRootsWithConfig functions like LoadRoots, except that it allows passing +// a custom loading config. The config will be modified to suit the needs of +// the loader. +// +// This is generally only useful for use in testing when you need to modify +// loading settings to load from a fake location. +func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, error) { + l := &loader{ + cfg: cfg, + packages: make(map[*packages.Package]*Package), + } + l.cfg.Mode |= packages.LoadImports | packages.NeedTypesSizes + if l.cfg.Fset == nil { + l.cfg.Fset = token.NewFileSet() + } + // put our build flags first so that callers can override them + l.cfg.BuildFlags = append([]string{"-tags", "ignore_autogenerated"}, l.cfg.BuildFlags...) + + rawPkgs, err := packages.Load(l.cfg, roots...) + if err != nil { + return nil, err + } + + for _, rawPkg := range rawPkgs { + l.Roots = append(l.Roots, l.packageFor(rawPkg)) + } + + return l.Roots, nil +} + +// importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/pkg/loader/paths.go b/pkg/loader/paths.go new file mode 100644 index 000000000..3b783e168 --- /dev/null +++ b/pkg/loader/paths.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "strings" +) + +// NonVendorPath returns a package path that does not include anything before the +// last vendor directory. This is useful for when using vendor directories, +// and using go/types.Package.Path(), which returns the full path including vendor. +// +// If you're using this, make sure you really need it -- it's better to index by +// the actual Package object when you can. +func NonVendorPath(rawPath string) string { + parts := strings.Split(rawPath, "/vendor/") + return parts[len(parts)-1] +} diff --git a/pkg/loader/refs.go b/pkg/loader/refs.go new file mode 100644 index 000000000..c55240b07 --- /dev/null +++ b/pkg/loader/refs.go @@ -0,0 +1,242 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + + "go/ast" + "strconv" + "sync" +) + +// NB(directxman12): most of this is done by the typechecker, +// but it's a bit slow/heavyweight for what we want -- we want +// to resolve external imports *only* if we actually need them. + +// Basically, what we do is: +// 1. Map imports to names +// 2. Find all explicit external references (`name.type`) +// 3. Find all referenced packages by merging explicit references and dot imports +// 4. Only type-check those packages +// 5. Ignore type-checking errors from the missing packages, because we won't ever +// touch unloaded types (they're probably used in ignored fields/types, variables, or functions) +// (done using PrintErrors with an ignore argument from the caller). +// 6. Notice any actual type-checking errors via invalid types + +// importsMap saves import aliases, mapping them to underlying packages. +type importsMap struct { + // dotImports maps package IDs to packages for any packages that have/ been imported as `.` + dotImports map[string]*Package + // byName maps package aliases or names to the underlying package. + byName map[string]*Package +} + +// mapImports maps imports from the names they use in the given file to the underlying package, +// using a map of package import paths to packages (generally from Package.Imports()). +func mapImports(file *ast.File, importedPkgs map[string]*Package) (*importsMap, error) { + m := &importsMap{ + dotImports: make(map[string]*Package), + byName: make(map[string]*Package), + } + for _, importSpec := range file.Imports { + path, err := strconv.Unquote(importSpec.Path.Value) + if err != nil { + return nil, ErrFromNode(err, importSpec.Path) + } + importedPkg := importedPkgs[path] + if importedPkg == nil { + return nil, ErrFromNode(fmt.Errorf("no such package located"), importSpec.Path) + } + if importSpec.Name == nil { + m.byName[importedPkg.Name] = importedPkg + continue + } + if importSpec.Name.Name == "." { + m.dotImports[importedPkg.ID] = importedPkg + continue + } + m.byName[importSpec.Name.Name] = importedPkg + } + + return m, nil +} + +// referenceSet finds references to external packages' types in the given file, +// without otherwise calling into the type-checker. When checking structs, +// it only checks fields with JSON tags. +type referenceSet struct { + file *ast.File + imports *importsMap + + externalRefs map[*Package]struct{} +} + +func (r *referenceSet) init() { + if r.externalRefs == nil { + r.externalRefs = make(map[*Package]struct{}) + } +} + +// NodeFilter filters nodes, accepting them for reference collection +// when true is returned and rejecting them when false is returned. +type NodeFilter func(ast.Node) bool + +// collectReferences saves all references to external types in the given info. +func (r *referenceSet) collectReferences(rawType ast.Expr, filterNode NodeFilter) { + r.init() + col := &referenceCollector{ + refs: r, + filterNode: filterNode, + } + ast.Walk(col, rawType) +} + +// external saves an external reference to the given named package. +func (r *referenceSet) external(pkgName string) { + pkg := r.imports.byName[pkgName] + r.externalRefs[pkg] = struct{}{} +} + +// referenceCollector visits nodes in an AST, adding external references to a +// referenceSet. +type referenceCollector struct { + refs *referenceSet + filterNode NodeFilter +} + +func (c *referenceCollector) Visit(node ast.Node) ast.Visitor { + if !c.filterNode(node) { + return nil + } + switch typedNode := node.(type) { + case *ast.Ident: + // local reference or dot-import, ignore + return nil + case *ast.SelectorExpr: + pkgName := typedNode.X.(*ast.Ident).Name + c.refs.external(pkgName) + return nil + default: + return c + } +} + +// allReferencedPackages finds all directly referenced packages in the given package. +func allReferencedPackages(pkg *Package, filterNodes NodeFilter) []*Package { + pkg.NeedSyntax() + refsByFile := make(map[*ast.File]*referenceSet) + for _, file := range pkg.Syntax { + imports, err := mapImports(file, pkg.Imports()) + if err != nil { + pkg.AddError(err) + return nil + } + refs := &referenceSet{ + file: file, + imports: imports, + } + refsByFile[file] = refs + } + + EachType(pkg, func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) { + refs := refsByFile[file] + refs.collectReferences(spec.Type, filterNodes) + }) + + allPackages := make(map[*Package]struct{}) + for _, refs := range refsByFile { + for _, pkg := range refs.imports.dotImports { + allPackages[pkg] = struct{}{} + } + for ref := range refs.externalRefs { + allPackages[ref] = struct{}{} + } + } + + res := make([]*Package, 0, len(allPackages)) + for pkg := range allPackages { + res = append(res, pkg) + } + return res +} + +// TypeChecker performs type-checking on a limitted subset of packages by +// checking each package's types' externally-referenced types, and only +// type-checking those packages. +type TypeChecker struct { + checkedPackages map[*Package]struct{} + filterNodes NodeFilter +} + +// Check type-checks the given package and all packages referenced +// by types that pass through (have true returned by) filterNodes. +func (c *TypeChecker) Check(root *Package, filterNodes NodeFilter) { + c.init() + + if filterNodes == nil { + filterNodes = c.filterNodes + } + + // use a sub-checker with the appropriate settings + (&TypeChecker{ + filterNodes: filterNodes, + checkedPackages: c.checkedPackages, + }).check(root) +} + +func (c *TypeChecker) init() { + if c.checkedPackages == nil { + c.checkedPackages = make(map[*Package]struct{}) + } + if c.filterNodes == nil { + // check every type by default + c.filterNodes = func(_ ast.Node) bool { + return true + } + } +} + +// check recursively type-checks the given package, only loading packages that +// are actually referenced by our types (it's the actual implementation of Check, +// without initialization). +func (c *TypeChecker) check(root *Package) { + root.Lock() + defer root.Unlock() + + if _, ok := c.checkedPackages[root]; ok { + return + } + + refedPackages := allReferencedPackages(root, c.filterNodes) + + // first, resolve imports for all leaf packages... + var wg sync.WaitGroup + for _, pkg := range refedPackages { + wg.Add(1) + go func(pkg *Package) { + defer wg.Done() + c.check(pkg) + }(pkg) + } + wg.Wait() + + // ...then, we can safely type-check ourself + root.NeedTypesInfo() + + c.checkedPackages[root] = struct{}{} +} diff --git a/pkg/loader/testutils/helpers.go b/pkg/loader/testutils/helpers.go new file mode 100644 index 000000000..c95457ce8 --- /dev/null +++ b/pkg/loader/testutils/helpers.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testutils defines utilities for using loader.Packages +// in tests. +// +// The main utility is LoadFakeRoots, which allows writing of fake +// modules to the local filesystem, then loading those from within +// a Ginkgo test. If not using Ginkgo, you can reproduce similar +// logic with loader.LoadWithConfig and go/packages/packagestest. +package testutils + +import ( + "testing" + + "github.com/onsi/ginkgo" + pkgstest "golang.org/x/tools/go/packages/packagestest" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// fakeT wraps Ginkgo's fake testing.T with a real testing.T +// to get the few missing methods. +type fakeT struct { + ginkgo.GinkgoTInterface + *testing.T +} + +// LoadFakeRoots loads the given "root" packages by fake module, +// transitively loading and all imports as well. +// +// Loaded packages will have type size, imports, and exports file information +// populated. Additional information, like ASTs and type-checking information, +// can be accessed via methods on individual packages. +func LoadFakeRoots(exporter pkgstest.Exporter, modules []pkgstest.Module, roots ...string) ([]*loader.Package, *pkgstest.Exported, error) { + exported := pkgstest.Export(fakeT{ginkgo.GinkgoT(), &testing.T{}}, exporter, modules) + + pkgs, err := loader.LoadRootsWithConfig(exported.Config, roots...) + return pkgs, exported, err +} diff --git a/pkg/loader/visit.go b/pkg/loader/visit.go new file mode 100644 index 000000000..b5646fde1 --- /dev/null +++ b/pkg/loader/visit.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "go/ast" + "reflect" + "strconv" +) + +// TypeCallback is a callback called for each raw AST (gendecl, typespec) combo. +type TypeCallback func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) + +// EachType calls the given callback for each (gendecl, typespec) combo in the +// given package. Generally, using markers.EachType is better when working +// with marker data, and has a more convinient representation. +func EachType(pkg *Package, cb TypeCallback) { + visitor := &typeVisitor{ + callback: cb, + } + pkg.NeedSyntax() + for _, file := range pkg.Syntax { + visitor.file = file + ast.Walk(visitor, file) + } +} + +// typeVisitor visits all TypeSpecs, calling the given callback for each. +type typeVisitor struct { + callback TypeCallback + decl *ast.GenDecl + file *ast.File +} + +// Visit visits all TypeSpecs. +func (v *typeVisitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + v.decl = nil + return v + } + + switch typedNode := node.(type) { + case *ast.File: + v.file = typedNode + return v + case *ast.GenDecl: + v.decl = typedNode + return v + case *ast.TypeSpec: + v.callback(v.file, v.decl, typedNode) + return nil // don't recurse + default: + return nil + } +} + +// ParseAstTag parses the given raw tag literal into a reflect.StructTag. +func ParseAstTag(tag *ast.BasicLit) reflect.StructTag { + if tag == nil { + return reflect.StructTag("") + } + tagStr, err := strconv.Unquote(tag.Value) + if err != nil { + return reflect.StructTag("") + } + return reflect.StructTag(tagStr) +} From f2ed5cb81e6f668a859c996201f7d7b56d0b9eb0 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 21:01:25 -0700 Subject: [PATCH 03/14] Marker parsing This introduces a framework for collecting and parsing "marker" comments into structured Go data, much like the "json" package. --- pkg/markers/collect.go | 420 +++++++++++++++ pkg/markers/collect_test.go | 194 +++++++ pkg/markers/doc.go | 95 ++++ pkg/markers/markers_suite_test.go | 126 +++++ pkg/markers/parse.go | 819 ++++++++++++++++++++++++++++++ pkg/markers/parse_test.go | 244 +++++++++ pkg/markers/zip.go | 191 +++++++ 7 files changed, 2089 insertions(+) create mode 100644 pkg/markers/collect.go create mode 100644 pkg/markers/collect_test.go create mode 100644 pkg/markers/doc.go create mode 100644 pkg/markers/markers_suite_test.go create mode 100644 pkg/markers/parse.go create mode 100644 pkg/markers/parse_test.go create mode 100644 pkg/markers/zip.go diff --git a/pkg/markers/collect.go b/pkg/markers/collect.go new file mode 100644 index 000000000..a97228e07 --- /dev/null +++ b/pkg/markers/collect.go @@ -0,0 +1,420 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "go/ast" + "go/token" + "strings" + "sync" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// Collector collects and parses marker comments defined in the registry +// from package source code. If no registry is provided, an empty one will +// be initialized on the first call to MarkersInPackage. +type Collector struct { + *Registry + + byPackage map[string]map[ast.Node]MarkerValues + mu sync.Mutex +} + +// MarkerValues are all the values for some set of markers. +type MarkerValues map[string][]interface{} + +// Get fetches the first value that for the given marker, returning +// nil if no values are available. +func (v MarkerValues) Get(name string) interface{} { + vals := v[name] + if len(vals) == 0 { + return nil + } + return vals[0] +} + +func (c *Collector) init() { + if c.Registry == nil { + c.Registry = &Registry{} + } + if c.byPackage == nil { + c.byPackage = make(map[string]map[ast.Node]MarkerValues) + } +} + +// MarkersInPackage computes the marker values by node for the given package. Results +// are cached by package ID, so this is safe to call repeatedly from different functions. +// Each file in the package is treated as a distinct node. +// +// We consider a marker to be associated with a given AST node if either of the following are true: +// +// - it's in the Godoc for that AST node +// +// - it's in the closest non-godoc comment group above that node, +// *and* that node is a type or field node, *and* [it's either +// registered as type-level *or* it's not registered as being +// package-level] +// +// - it's not in the Godoc of a node, doesn't meet the above criteria, and +// isn't in a struct definition (in which case it's package-level) +func (c *Collector) MarkersInPackage(pkg *loader.Package) (map[ast.Node]MarkerValues, error) { + c.mu.Lock() + c.init() + if markers, exist := c.byPackage[pkg.ID]; exist { + c.mu.Unlock() + return markers, nil + } + // unlock early, it's ok if we do a bit extra work rather than locking while we're working + c.mu.Unlock() + + pkg.NeedSyntax() + nodeMarkersRaw := c.associatePkgMarkers(pkg) + markers, err := c.parseMarkersInPackage(nodeMarkersRaw) + if err != nil { + return nil, err + } + + c.mu.Lock() + defer c.mu.Unlock() + c.byPackage[pkg.ID] = markers + + return markers, nil +} + +// parseMarkersInPackage parses the given raw marker comments into output values using the registry. +func (c *Collector) parseMarkersInPackage(nodeMarkersRaw map[ast.Node][]markerComment) (map[ast.Node]MarkerValues, error) { + var errors []error + nodeMarkerValues := make(map[ast.Node]MarkerValues) + for node, markersRaw := range nodeMarkersRaw { + var target TargetType + switch node.(type) { + case *ast.File: + target = DescribesPackage + case *ast.Field: + target = DescribesField + default: + target = DescribesType + } + markerVals := make(map[string][]interface{}) + for _, markerRaw := range markersRaw { + markerText := markerRaw.Text() + def := c.Registry.Lookup(markerText, target) + if def == nil { + continue + } + val, err := def.Parse(markerText) + if err != nil { + errors = append(errors, loader.ErrFromNode(err, markerRaw)) + continue + } + markerVals[def.Name] = append(markerVals[def.Name], val) + } + nodeMarkerValues[node] = markerVals + } + + return nodeMarkerValues, loader.MaybeErrList(errors) +} + +// associatePkgMarkers associates markers with AST nodes in the given package. +func (c *Collector) associatePkgMarkers(pkg *loader.Package) map[ast.Node][]markerComment { + nodeMarkers := make(map[ast.Node][]markerComment) + for _, file := range pkg.Syntax { + fileNodeMarkers := c.associateFileMarkers(file) + for node, markers := range fileNodeMarkers { + nodeMarkers[node] = append(nodeMarkers[node], markers...) + } + } + + return nodeMarkers +} + +// associateFileMarkers associates markers with AST nodes in the given file. +func (c *Collector) associateFileMarkers(file *ast.File) map[ast.Node][]markerComment { + // grab all the raw marker comments by node + visitor := markerSubVisitor{ + collectPackageLevel: true, + markerVisitor: &markerVisitor{ + nodeMarkers: make(map[ast.Node][]markerComment), + allComments: file.Comments, + }, + } + ast.Walk(visitor, file) + + // grab the last package-level comments at the end of the file (if any) + lastFileMarkers := visitor.markersBetween(false, visitor.commentInd, len(visitor.allComments)) + visitor.pkgMarkers = append(visitor.pkgMarkers, lastFileMarkers...) + + // figure out if any type-level markers are actually package-level markers + for node, markers := range visitor.nodeMarkers { + _, isType := node.(*ast.TypeSpec) + if !isType { + continue + } + endOfMarkers := 0 + for _, marker := range markers { + if marker.fromGodoc { + // markers from godoc are never package level + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + markerText := marker.Text() + typeDef := c.Registry.Lookup(markerText, DescribesType) + if typeDef != nil { + // prefer assuming type-level markers + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + def := c.Registry.Lookup(markerText, DescribesPackage) + if def == nil { + // assume type-level unless proven otherwise + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + // it's package-level, since a package-level definition exists + visitor.pkgMarkers = append(visitor.pkgMarkers, marker) + } + visitor.nodeMarkers[node] = markers[:endOfMarkers] // re-set after trimming the package markers + } + visitor.nodeMarkers[file] = visitor.pkgMarkers + + return visitor.nodeMarkers +} + +// markerComment is an AST comment that contains a marker. +// It may or may not be from a Godoc comment, which affects +// marker re-associated (from type-level to package-level) +type markerComment struct { + *ast.Comment + fromGodoc bool +} + +// Text returns the text of the marker, stripped of the comment +// marker and leading spaces, as should be passed to Registry.Lookup +// and Registry.Parse. +func (c markerComment) Text() string { + return strings.TrimSpace(c.Comment.Text[2:]) +} + +// markerVisistor visits AST nodes, recording markers associated with each node. +type markerVisitor struct { + allComments []*ast.CommentGroup + commentInd int + + declComments []markerComment + lastLineCommentGroup *ast.CommentGroup + + pkgMarkers []markerComment + nodeMarkers map[ast.Node][]markerComment +} + +// isMarkerComment checks that the given comment is a single-line (`//`) +// comment and it's first non-space content is `+`. +func isMarkerComment(comment string) bool { + if comment[0:2] != "//" { + return false + } + stripped := strings.TrimSpace(comment[2:]) + if len(stripped) < 1 || stripped[0] != '+' { + return false + } + return true +} + +// markersBetween grabs the markers between the given indicies in the list of all comments. +func (v *markerVisitor) markersBetween(fromGodoc bool, start, end int) []markerComment { + if start < 0 || end < 0 { + return nil + } + var res []markerComment + for i := start; i < end; i++ { + commentGroup := v.allComments[i] + for _, comment := range commentGroup.List { + if !isMarkerComment(comment.Text) { + continue + } + res = append(res, markerComment{Comment: comment, fromGodoc: fromGodoc}) + } + } + return res +} + +type markerSubVisitor struct { + *markerVisitor + node ast.Node + collectPackageLevel bool +} + +// Visit collects markers for each node in the AST, optionally +// collecting unassociated markers as package-level. +func (v markerSubVisitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + // end of the node, so we might need to advance comments beyond the end + // of the block if we don't want to collect package-level markers in + // this block. + + if !v.collectPackageLevel { + if v.commentInd < len(v.allComments) { + lastCommentInd := v.commentInd + nextGroup := v.allComments[lastCommentInd] + for nextGroup.Pos() < v.node.End() { + lastCommentInd++ + if lastCommentInd >= len(v.allComments) { + // after the increment so our decrement below still makes sense + break + } + nextGroup = v.allComments[lastCommentInd] + } + v.commentInd = lastCommentInd + } + } + + return nil + } + + // skip comments on the same line as the previous node + if v.lastLineCommentGroup != nil && v.lastLineCommentGroup.Pos() == v.allComments[v.commentInd].Pos() { + v.commentInd++ + } + + // stop visiting if there are no more comments in the file + // NB(directxman12): we can't just stop immediately, because we + // still need to check if there are typespecs associated with gendecls. + var markerCommentBlock []markerComment + var docCommentBlock []markerComment + lastCommentInd := v.commentInd + if v.commentInd < len(v.allComments) { + // figure out the first comment after the node in question... + nextGroup := v.allComments[lastCommentInd] + for nextGroup.Pos() < node.Pos() { + lastCommentInd++ + if lastCommentInd >= len(v.allComments) { + // after the increment so our decrement below still makes sense + break + } + nextGroup = v.allComments[lastCommentInd] + } + lastCommentInd-- // ...then decrement to get the last comment before the node in question + + // figure out the godoc comment so we can deal with it separately + var docGroup *ast.CommentGroup + docGroup, v.lastLineCommentGroup = associatedCommentsFor(node) + + // find the last comment group that's not godoc + markerCommentInd := lastCommentInd + if docGroup != nil && v.allComments[markerCommentInd].Pos() == docGroup.Pos() { + markerCommentInd-- + } + + // check if we have freestanding package markers, + // and find the markers in our "closest non-godoc" comment block, + // plus our godoc comment block + if markerCommentInd >= v.commentInd { + if v.collectPackageLevel { + // assume anything between the comment ind and the marker ind (not including it) + // are package-level + v.pkgMarkers = append(v.pkgMarkers, v.markersBetween(false, v.commentInd, markerCommentInd)...) + } + markerCommentBlock = v.markersBetween(false, markerCommentInd, markerCommentInd+1) + docCommentBlock = v.markersBetween(true, markerCommentInd+1, lastCommentInd+1) + } else { + docCommentBlock = v.markersBetween(true, markerCommentInd+1, lastCommentInd+1) + } + } + + resVisitor := markerSubVisitor{ + collectPackageLevel: false, // don't collect package level by default + markerVisitor: v.markerVisitor, + node: node, + } + + // associate those markers with a node + switch typedNode := node.(type) { + case *ast.GenDecl: + // save the comments associated with the gen-decl if it's a single-line type decl + if typedNode.Lparen != token.NoPos || typedNode.Tok != token.TYPE { + // not a single-line type spec, treat them as free comments + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + break + } + // save these, we'll need them when we encounter the actual type spec + v.declComments = append(v.declComments, markerCommentBlock...) + v.declComments = append(v.declComments, docCommentBlock...) + case *ast.TypeSpec: + // add in comments attributed to the gen-decl, if any, + // as well as comments associated with the actual type + v.nodeMarkers[node] = append(v.nodeMarkers[node], v.declComments...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], markerCommentBlock...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], docCommentBlock...) + + v.declComments = nil + v.collectPackageLevel = false // don't collect package-level inside type structs + case *ast.Field: + v.nodeMarkers[node] = append(v.nodeMarkers[node], markerCommentBlock...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], docCommentBlock...) + case *ast.File: + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + v.pkgMarkers = append(v.pkgMarkers, docCommentBlock...) + + // collect markers in root file scope + resVisitor.collectPackageLevel = true + default: + // assume markers before anything else are package-level markers, + // *but* don't include any markers in godoc + if v.collectPackageLevel { + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + } + } + + // increment the comment ind so that we start at the right place for the next node + v.commentInd = lastCommentInd + 1 + + return resVisitor + +} + +// associatedCommentsFor returns the doc comment group (if relevant and present) and end-of-line comment +// (again if relevant and present) for the given AST node. +func associatedCommentsFor(node ast.Node) (docGroup *ast.CommentGroup, lastLineCommentGroup *ast.CommentGroup) { + switch typedNode := node.(type) { + case *ast.Field: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.File: + docGroup = typedNode.Doc + case *ast.FuncDecl: + docGroup = typedNode.Doc + case *ast.GenDecl: + docGroup = typedNode.Doc + case *ast.ImportSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.TypeSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.ValueSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + default: + lastLineCommentGroup = nil + } + + return docGroup, lastLineCommentGroup +} diff --git a/pkg/markers/collect_test.go b/pkg/markers/collect_test.go new file mode 100644 index 000000000..3f24023e6 --- /dev/null +++ b/pkg/markers/collect_test.go @@ -0,0 +1,194 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-tools/pkg/markers" +) + +type fieldPath struct { + typ string + field string +} + +var _ = Describe("Collecting", func() { + var col *Collector + var markersByType map[string]MarkerValues + var markersByField map[fieldPath]MarkerValues + + JustBeforeEach(func() { + By("setting up the registry and collector") + reg := &Registry{} + + mustDefine(reg, "testing:pkglvl", DescribesPackage, "") + mustDefine(reg, "testing:typelvl", DescribesType, "") + mustDefine(reg, "testing:fieldlvl", DescribesField, "") + mustDefine(reg, "testing:eitherlvl", DescribesType, "") + mustDefine(reg, "testing:eitherlvl", DescribesPackage, "") + + col = &Collector{Registry: reg} + + By("gathering markers by type name") + markersByType = make(map[string]MarkerValues) + markersByField = make(map[fieldPath]MarkerValues) + err := EachType(col, fakePkg, func(info *TypeInfo) { + markersByType[info.Name] = info.Markers + for _, field := range info.Fields { + markersByField[fieldPath{typ: info.Name, field: field.Name}] = field.Markers + } + }) + Expect(err).NotTo(HaveOccurred()) + Expect(fakePkg.Errors).To(HaveLen(0)) + }) + + Context("of package-level markers", func() { + + It("should consider markers anywhere not obviously type- or field-level as package-level", func() { + By("grabbing all package-level markers") + pkgMarkers, err := PackageMarkers(col, fakePkg) + Expect(err).NotTo(HaveOccurred()) + Expect(fakePkg.Errors).To(HaveLen(0)) + + By("checking that it only contains package-level markers") + Expect(pkgMarkers).NotTo(HaveKey("testing:typelvl")) + + By("checking that it contains the right package-level markers") + Expect(pkgMarkers).To(HaveKeyWithValue("testing:pkglvl", ContainElement("here unattached"))) + Expect(pkgMarkers).To(HaveKeyWithValue("testing:pkglvl", ContainElement("here at end after last node"))) + + By("checking that it doesn't contain any markers it's not supposed to") + Expect(pkgMarkers).To(HaveKeyWithValue("testing:pkglvl", Not(ContainElement(ContainSubstring("not here"))))) + }) + }) + + Context("of type-level markers", func() { + It("should not have package-level markers associated", func() { + Expect(markersByType).To(HaveKeyWithValue("Foo", Not(HaveKey("testing:pkglvl")))) + }) + + It("should not contain markers it's not supposed to", func() { + Expect(markersByType).NotTo(ContainElement(HaveKeyWithValue("testing:typelvl", ContainElement(ContainSubstring("not here"))))) + Expect(markersByType).NotTo(ContainElement(HaveKeyWithValue("testing:eitherlvl", ContainElement(ContainSubstring("not here"))))) + }) + + Context("with godoc", func() { + It("should associate markers in godoc", func() { + Expect(markersByType).To(HaveKeyWithValue("Foo", + HaveKeyWithValue("testing:typelvl", ContainElement("here on type")))) + }) + + It("should associate markers in the closest non-godoc block", func() { + Expect(markersByType).To(HaveKeyWithValue("Foo", + HaveKeyWithValue("testing:typelvl", ContainElement("here before type")))) + }) + + It("should re-associate package-level markers from the closest non-godoc", func() { + By("grabbing package markers") + pkgMarkers, err := PackageMarkers(col, fakePkg) + Expect(err).NotTo(HaveOccurred()) + Expect(fakePkg.Errors).To(HaveLen(0)) + + By("checking that the marker got reassociated") + Expect(pkgMarkers).To(HaveKeyWithValue("testing:pkglvl", ContainElement("here reassociated"))) + }) + }) + Context("without godoc", func() { + It("should associate markers in the closest block", func() { + Expect(markersByType).To(HaveKeyWithValue("Quux", + HaveKeyWithValue("testing:typelvl", ContainElement("here without godoc")))) + }) + + It("should re-associate package-level markers from the closest block", func() { + By("grabbing package markers") + pkgMarkers, err := PackageMarkers(col, fakePkg) + Expect(err).NotTo(HaveOccurred()) + Expect(fakePkg.Errors).To(HaveLen(0)) + + By("checking that the marker got reassociated") + Expect(pkgMarkers).To(HaveKeyWithValue("testing:pkglvl", ContainElement("here reassociated no godoc"))) + + }) + }) + + It("should not re-associate package-level markers in godoc", func() { + By("grabbing package markers") + pkgMarkers, err := PackageMarkers(col, fakePkg) + Expect(err).NotTo(HaveOccurred()) + Expect(fakePkg.Errors).To(HaveLen(0)) + + By("checking that the marker didn't get reassociated") + Expect(pkgMarkers).To(HaveKeyWithValue("testing:pkglvl", Not(ContainElement("not here godoc")))) + }) + + It("should not re-associate package-level markers if there's also a type-level marker", func() { + By("checking that the type has the marker") + Expect(markersByType).To(HaveKeyWithValue("Foo", + HaveKeyWithValue("testing:eitherlvl", ContainElement("here not reassociated")))) + + By("grabbing package markers") + pkgMarkers, err := PackageMarkers(col, fakePkg) + Expect(err).NotTo(HaveOccurred()) + Expect(fakePkg.Errors).To(HaveLen(0)) + + By("checking that the marker didn't get reassociated to the package") + Expect(pkgMarkers).To(SatisfyAny( + Not(HaveKey("testing:eitherlvl")), + HaveKeyWithValue("testing:eitherlvl", Not(ContainElement("here not reassociated"))))) + + }) + + It("should consider markers on the gendecl even if there are no more markers in the file", func() { + Expect(markersByType).To(HaveKeyWithValue("Cheese", + HaveKeyWithValue("testing:typelvl", ContainElement("here on typedecl with no more")))) + + }) + }) + + Context("of field-level markers", func() { + It("should not contain markers it's not supposed to", func() { + Expect(markersByField).NotTo(ContainElement(HaveKeyWithValue("testing:fieldlvl", ContainElement(ContainSubstring("not here"))))) + }) + Context("with godoc", func() { + It("should associate markers in godoc", func() { + Expect(markersByField).To(HaveKeyWithValue(fieldPath{typ: "Foo", field: "WithGodoc"}, + HaveKeyWithValue("testing:fieldlvl", ContainElement("here in godoc")))) + }) + + It("should associate markers in the closest non-godoc block", func() { + Expect(markersByField).To(HaveKeyWithValue(fieldPath{typ: "Foo", field: "WithGodoc"}, + HaveKeyWithValue("testing:fieldlvl", ContainElement("here before godoc")))) + }) + }) + Context("without godoc", func() { + It("should associate markers in the closest block", func() { + Expect(markersByField).To(HaveKeyWithValue(fieldPath{typ: "Foo", field: "WithoutGodoc"}, + HaveKeyWithValue("testing:fieldlvl", ContainElement("here without godoc")))) + }) + }) + + It("should not consider markers at the end of a line", func() { + Expect(markersByField).To(HaveKeyWithValue(fieldPath{typ: "Foo", field: "WithGodoc"}, + HaveKeyWithValue("testing:fieldlvl", Not(ContainElement("not here after field"))))) + + Expect(markersByField).To(HaveKeyWithValue(fieldPath{typ: "Foo", field: "WithoutGodoc"}, + HaveKeyWithValue("testing:fieldlvl", Not(ContainElement("not here after field"))))) + }) + }) +}) diff --git a/pkg/markers/doc.go b/pkg/markers/doc.go new file mode 100644 index 000000000..1000fc324 --- /dev/null +++ b/pkg/markers/doc.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package markers contains utilities for defining and parsing "marker +// comments", also occasionally called tag comments (we use the term marker to +// avoid confusing with struct tags). Parsed result (output) values take the +// form of Go values, much like the "encoding/json" package. +// +// Definitions and Parsing +// +// Markers are defined as structured Definitions which can be used to +// consistently parse marker comments. A Definition contains an concrete +// output type for the marker, which can be a simple type (like string), a +// struct, or a wrapper type (useful for defining additional methods on marker +// types). +// +// Markers take the general form +// +// +path:to:marker=val +// +// +path:to:marker:arg1=val,arg2=val2 +// +// +path:to:marker +// +// Arguments may be ints, bools, strings, and slices. Ints and bool take their +// standard form from Go. Strings may take any of their standard forms, or any +// sequence of unquoted characters up until a `,` or `;` is encountered. Lists +// take either of the following forms: +// +// val;val;val +// +// {val, val, val} +// +// Note that the first form will not properly parse nested slices, but is +// generally convenient and is the form used in many existing markers. +// +// Each of those argument types maps to the corresponding go type. Pointers +// mark optional fields (a struct tag, below, may also be used). The empty +// interface will match any type. +// +// Struct fields may optionally be annotated with the `marker` struct tag. The +// first argument is a name override. If it's left blank (or the tag isn't +// present), the camelCase version of the name will be used. The only +// additional argument defined is `optional`, which marks a field as optional +// without using a pointer. +// +// All parsed values are unmarshalled into the output type. If any +// non-optional fields aren't mentioned, an error will be raised unless +// `Strict` is set to false. +// +// Registries and Lookup +// +// Definitions can be added to registries to facilitate lookups. Each +// definition is marked as either describing a type, struct field, or package +// (unassociated). The same marker name may be registered multiple times, as +// long as each describes a different construct (type, field, or package). +// Definitions can then be looked up by passing unparsed markers. +// +// Collection and Extraction +// +// Markers can be collected from a loader.Package using a Collector. The +// Collector will read from a given Registry, collecting comments that look +// like markers and parsing them if they match some definition on the registry. +// +// Markers are considered associated with a particular field or type if they +// exist in the Godoc, or the closest non-godoc comment. Any other markers not +// inside a some other block (e.g. a struct definition, interface definition, +// etc) are considered package level. Markers in a "closest non-Go comment +// block" may also be considered package level if registered as such and no +// identical type-level definition exists. +// +// Like loader.Package, Collector's methods are idempotent and will not +// reperform work. +// +// Traversal +// +// EachType function iterates over each type in a Package, providing +// conveniently structured type and field information with marker values +// associated. +// +// PackageMarkers can be used to fetch just package-level markers. +package markers diff --git a/pkg/markers/markers_suite_test.go b/pkg/markers/markers_suite_test.go new file mode 100644 index 000000000..0367573ff --- /dev/null +++ b/pkg/markers/markers_suite_test.go @@ -0,0 +1,126 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + pkgstest "golang.org/x/tools/go/packages/packagestest" + "sigs.k8s.io/controller-tools/pkg/loader" + testloader "sigs.k8s.io/controller-tools/pkg/loader/testutils" +) + +func TestMarkers(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Markers Suite") +} + +// do this once because it creates files + +var fakePkg *loader.Package +var exported *pkgstest.Exported + +var _ = BeforeSuite(func() { + modules := []pkgstest.Module{ + { + Name: "sigs.k8s.io/controller-tools/pkg/markers/testdata", + Files: map[string]interface{}{ + "file.go": ` + package testdata + + import ( + // nothing here should be parsed + + // +testing:pkglvl="not here import 1" + + // +testing:pkglvl="not here import 2" + foo "fmt" // +testing:pkglvl="not here import 3" + + // +testing:pkglvl="not here import 4" + ) + + // +testing:pkglvl="here unattached" + + // +testing:pkglvl="here reassociated" + // +testing:typelvl="here before type" + // +testing:eitherlvl="here not reassociated" + // +testing:fieldlvl="not here not near field" + + // normal godoc + // +testing:pkglvl="not here godoc" + // +testing:typelvl="here on type" + // normal godoc + type Foo struct { + // +testing:pkglvl="not here in struct" + // +testing:fieldlvl="here before godoc" + + // normal godoc + // +testing:fieldlvl="here in godoc" + // +testing:pkglvl="not here godoc" + // normal godoc + WithGodoc string // +testing:fieldlvl="not here after field" + + // +testing:fieldlvl="here without godoc" + + WithoutGodoc int + } // +testing:pkglvl="not here after type" + + // +testing:pkglvl="not here on var" + var ( + // +testing:pkglvl="not here in var" + Bar = "foo" + ) + + type Baz interface { + // +testing:pkglvl="not here in interface" + } + + // +testing:typelvl="not here beyond closest" + + // +testing:typelvl="here without godoc" + // +testing:pkglvl="here reassociated no godoc" + + type Quux string + + // +testing:pkglvl="here at end after last node" + + /* +testing:pkglvl="not here in block" */ + + // +testing:typelvl="here on typedecl with no more" + type Cheese struct { } + `, + }, + }, + } + + By("setting up the fake packages") + var pkgs []*loader.Package + var err error + pkgs, exported, err = testloader.LoadFakeRoots(pkgstest.Modules, modules, "sigs.k8s.io/controller-tools/pkg/markers/testdata") + Expect(err).NotTo(HaveOccurred()) + Expect(pkgs).To(HaveLen(1)) + + fakePkg = pkgs[0] +}) + +var _ = AfterSuite(func() { + By("cleaning up the fake packages") + exported.Cleanup() +}) diff --git a/pkg/markers/parse.go b/pkg/markers/parse.go new file mode 100644 index 000000000..fb5aa3ba1 --- /dev/null +++ b/pkg/markers/parse.go @@ -0,0 +1,819 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + sc "text/scanner" + "unicode" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// expect checks that the next token of the scanner is the given token, adding an error +// to the scanner if not. It returns whether the token was as expected. +func expect(scanner *sc.Scanner, expected rune, errDesc string) bool { + tok := scanner.Scan() + if tok != expected { + scanner.Error(scanner, fmt.Sprintf("expected %s, got %q", errDesc, scanner.TokenText())) + return false + } + return true +} + +var ( + // interfaceType is a pre-computed reflect.Type representing the empty interface. + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + rawArgsType = reflect.TypeOf((*RawArguments)(nil)).Elem() +) + +// lowerCamelCase converts PascalCase string to +// a camelCase string (by lowering the first rune). +func lowerCamelCase(in string) string { + isFirst := true + return strings.Map(func(inRune rune) rune { + if isFirst { + isFirst = false + return unicode.ToLower(inRune) + } + return inRune + }, in) +} + +// RawArguments is a special type that can be used for a marker +// to receive *all* raw, underparsed argument data for a marker. +// You probably want to use `interface{}` to match any type instead. +// Use *only* for legacy markers that don't follow Definition's normal +// parsing logic. It should *not* be used as a field in a marker struct. +type RawArguments []byte + +// ArgumentType is the kind of a marker argument type. +// It's roughly analogous to a subset of reflect.Kind, with +// an extra "AnyType" to represent the empty interface. +type ArgumentType int + +const ( + // Invalid represents a type that can't be parsed, and should never be used. + InvalidType ArgumentType = iota + // IntType is an int + IntType + // StringType is a string + StringType + // BoolType is a bool + BoolType + // AnyType is the empty interface, and matches the rest of the content + AnyType + // SliceType is any slice constructed of the ArgumentTypes + SliceType + // RawType represents content that gets passed directly to the marker + // without any parsing. It should *only* be used with anonymous markers. + RawType +) + +// Argument is the type of a marker argument. +type Argument struct { + Type ArgumentType + Optional bool + Pointer bool + + ItemType *Argument +} + +// typeString contains the internals of TypeString. +func (a Argument) typeString(out *strings.Builder) { + if a.Pointer { + out.WriteRune('*') + } + + switch a.Type { + case InvalidType: + out.WriteString("") + case IntType: + out.WriteString("int") + case StringType: + out.WriteString("string") + case BoolType: + out.WriteString("bool") + case AnyType: + out.WriteString("") + case SliceType: + out.WriteString("[]") + // arguments can't be non-pointer optional, so just call into typeString again. + a.ItemType.typeString(out) + case RawType: + out.WriteString("") + } +} + +// TypeString returns a string roughly equivalent +// (but not identical) to the underlying Go type that +// this argument would parse to. It's mainly useful +// for user-friendly formatting of this argument (e.g. +// help strings). +func (a Argument) TypeString() string { + out := &strings.Builder{} + a.typeString(out) + return out.String() +} + +func (a Argument) String() string { + if a.Optional { + return fmt.Sprintf("", a.TypeString()) + } + return fmt.Sprintf("", a.TypeString()) +} + +// castAndSet casts val to out's type if needed, +// then sets out to val. +func castAndSet(out, val reflect.Value) { + outType := out.Type() + if outType != val.Type() { + val = val.Convert(outType) + } + out.Set(val) +} + +// makeSliceType makes a reflect.Type for a slice of the given type. +// Useful for constructing the out value for when AnyType's guess returns a slice. +func makeSliceType(itemType Argument) (reflect.Type, error) { + var itemReflectedType reflect.Type + switch itemType.Type { + case IntType: + itemReflectedType = reflect.TypeOf(int(0)) + case StringType: + itemReflectedType = reflect.TypeOf("") + case BoolType: + itemReflectedType = reflect.TypeOf(false) + case SliceType: + subItemType, err := makeSliceType(*itemType.ItemType) + if err != nil { + return nil, err + } + itemReflectedType = subItemType + default: + return nil, fmt.Errorf("invalid type when constructing guessed slice out: %v", itemType.Type) + } + + if itemType.Pointer { + itemReflectedType = reflect.PtrTo(itemReflectedType) + } + + return reflect.SliceOf(itemReflectedType), nil +} + +// guessType takes an educated guess about the type of the next field. If allowSlice +// is false, it will not guess slices. It's less efficient than parsing with actual +// type information, since we need to allocate to peek ahead full tokens, and the scanner +// only allows peeking ahead one character. +func guessType(scanner *sc.Scanner, raw string, allowSlice bool) *Argument { + if allowSlice { + maybeItem := guessType(scanner, raw, false) + + subRaw := raw[scanner.Pos().Offset:] + subScanner := parserScanner(subRaw, scanner.Error) + + var tok rune + for tok = subScanner.Scan(); tok != ',' && tok != sc.EOF && tok != ';'; tok = subScanner.Scan() { + // wait till we get something interesting + } + + // semicolon means it's a legacy slice + if tok == ';' { + return &Argument{ + Type: SliceType, + ItemType: maybeItem, + } + } + + return maybeItem + } + + // first, try the easy case -- quoted strings strings + hint := scanner.Peek() + switch hint { + case '"', '\'', '`': + return &Argument{Type: StringType} + } + + // everything else needs a duplicate scanner to scan properly + // (so we don't consume our scanner tokens until we actually + // go to use this -- Go doesn't like scanners that can be rewound). + subRaw := raw[scanner.Pos().Offset:] + subScanner := parserScanner(subRaw, scanner.Error) + + // next, check for slices + if hint == '{' { + subScanner.Scan() + return &Argument{ + Type: SliceType, + ItemType: guessType(subScanner, subRaw, false), + } + } + + // then, bools... + probablyString := false + if hint == 't' || hint == 'f' { + // maybe a bool + if nextTok := subScanner.Scan(); nextTok == sc.Ident { + switch subScanner.TokenText() { + case "true", "false": + // definitely a bool + return &Argument{Type: BoolType} + } + // probably a string + probablyString = true + } else { + // we shouldn't ever get here + scanner.Error(scanner, fmt.Sprintf("got a token (%q) that looked like an ident, but was not", scanner.TokenText())) + return &Argument{Type: InvalidType} + } + } + + if !probablyString { + if nextTok := subScanner.Scan(); nextTok == sc.Int { + return &Argument{Type: IntType} + } + } + + // otherwise assume bare strings + return &Argument{Type: StringType} +} + +// parseString parses either of the two accepted string forms (quoted, or bare tokens). +func (a *Argument) parseString(scanner *sc.Scanner, raw string, out reflect.Value) { + // strings are a bit weird -- the "easy" case is quoted strings (tokenized as strings), + // the "hard" case (present for backwards compat) is a bare sequence of tokens that aren't + // a comma. + tok := scanner.Scan() + if tok == sc.String || tok == sc.RawString { + // the easy case + val, err := strconv.Unquote(scanner.TokenText()) + if err != nil { + scanner.Error(scanner, fmt.Sprintf("unable to parse string: %v", err)) + return + } + castAndSet(out, reflect.ValueOf(val)) + return + } + + // the "hard" case -- bare tokens not including ',' (the argument + // separator), ';' (the slice separator), or '}' (delimitted slice + // ender) + startPos := scanner.Position.Offset + for hint := scanner.Peek(); hint != ',' && hint != ';' && hint != '}' && hint != sc.EOF; hint = scanner.Peek() { + // skip this token + scanner.Scan() + } + endPos := scanner.Position.Offset + len(scanner.TokenText()) + castAndSet(out, reflect.ValueOf(raw[startPos:endPos])) +} + +// parseSlice parses either of the two slice forms (curly-brace-delimitted and semicolon-separated). +func (a *Argument) parseSlice(scanner *sc.Scanner, raw string, out reflect.Value) { + // slices have two supported formats, like string: + // - `{val, val, val}` (preferred) + // - `val;val;val` (legacy) + resSlice := reflect.Zero(out.Type()) + elem := reflect.Indirect(reflect.New(out.Type().Elem())) + + // preferred case + if scanner.Peek() == '{' { + // NB(directxman12): supporting delimitted slices in bare slices + // would require an extra look-ahead here :-/ + + scanner.Scan() // skip '{' + for hint := scanner.Peek(); hint != '}' && hint != sc.EOF; hint = scanner.Peek() { + a.ItemType.parse(scanner, raw, elem, true) + resSlice = reflect.Append(resSlice, elem) + tok := scanner.Peek() + if tok == '}' { + break + } + if !expect(scanner, ',', "comma") { + return + } + } + if !expect(scanner, '}', "close curly brace") { + return + } + castAndSet(out, resSlice) + return + } + + // legacy case + for hint := scanner.Peek(); hint != ',' && hint != '}' && hint != sc.EOF; hint = scanner.Peek() { + a.ItemType.parse(scanner, raw, elem, true) + resSlice = reflect.Append(resSlice, elem) + tok := scanner.Peek() + if tok == ',' || tok == '}' || tok == sc.EOF { + break + } + scanner.Scan() + if tok != ';' { + scanner.Error(scanner, fmt.Sprintf("expected comma, got %q", scanner.TokenText())) + return + } + } + castAndSet(out, resSlice) +} + +// parse functions like Parse, except that it allows passing down whether or not we're +// already in a slice, to avoid duplicate legacy slice detection for AnyType +func (a *Argument) parse(scanner *sc.Scanner, raw string, out reflect.Value, inSlice bool) { + // nolint:gocyclo + if a.Type == InvalidType { + scanner.Error(scanner, fmt.Sprintf("cannot parse invalid type")) + return + } + if a.Pointer { + out.Set(reflect.New(out.Type().Elem())) + out = reflect.Indirect(out) + } + switch a.Type { + case RawType: + // raw consumes everything else + castAndSet(out, reflect.ValueOf(raw[scanner.Pos().Offset:])) + // consume everything else + for tok := scanner.Scan(); tok != sc.EOF; tok = scanner.Scan() { + } + case IntType: + if !expect(scanner, sc.Int, "integer") { + return + } + // TODO(directxman12): respect the size when parsing + val, err := strconv.Atoi(scanner.TokenText()) + if err != nil { + scanner.Error(scanner, fmt.Sprintf("unable to parse integer: %v", err)) + return + } + castAndSet(out, reflect.ValueOf(val)) + case StringType: + // strings are a bit weird -- the "easy" case is quoted strings (tokenized as strings), + // the "hard" case (present for backwards compat) is a bare sequence of tokens that aren't + // a comma. + a.parseString(scanner, raw, out) + case BoolType: + if !expect(scanner, sc.Ident, "true or false") { + return + } + switch scanner.TokenText() { + case "true": + castAndSet(out, reflect.ValueOf(true)) + case "false": + castAndSet(out, reflect.ValueOf(false)) + default: + scanner.Error(scanner, fmt.Sprintf("expected true or false, got %q", scanner.TokenText())) + return + } + case AnyType: + guessedType := guessType(scanner, raw, !inSlice) + newOut := out + if guessedType.Type == SliceType { + // we need to be able to construct the right element types, below + // in parse, so construct a concretely-typed value to use as "out" + newType, err := makeSliceType(*guessedType.ItemType) + if err != nil { + scanner.Error(scanner, err.Error()) + return + } + newOut = reflect.Indirect(reflect.New(newType)) + } + if !newOut.CanSet() { + panic("at the disco") + } + guessedType.Parse(scanner, raw, newOut) + castAndSet(out, newOut) + case SliceType: + // slices have two supported formats, like string: + // - `{val, val, val}` (preferred) + // - `val;val;val` (legacy) + a.parseSlice(scanner, raw, out) + } +} + +// Parse attempts to consume the argument from the given scanner (based on the given +// raw input as well for collecting ranges of content), and places the output value +// in the given reflect.Value. Errors are reported via the given scanner. +func (a *Argument) Parse(scanner *sc.Scanner, raw string, out reflect.Value) { + a.parse(scanner, raw, out, false) +} + +// ArgumentFromType constructs an Argument by examining the given +// raw reflect.Type. It can construct arguments from the Go types +// corresponding to any of the types listed in ArgumentType. +func ArgumentFromType(rawType reflect.Type) (Argument, error) { + if rawType == rawArgsType { + return Argument{ + Type: RawType, + }, nil + } + + if rawType == interfaceType { + return Argument{ + Type: AnyType, + }, nil + } + + arg := Argument{} + if rawType.Kind() == reflect.Ptr { + rawType = rawType.Elem() + arg.Pointer = true + arg.Optional = true + } + + switch rawType.Kind() { + case reflect.String: + arg.Type = StringType + case reflect.Int, reflect.Int32: // NB(directxman12): all ints in kubernetes are int32, so explicitly support that + arg.Type = IntType + case reflect.Bool: + arg.Type = BoolType + case reflect.Slice: + arg.Type = SliceType + itemType, err := ArgumentFromType(rawType.Elem()) + if err != nil { + return Argument{}, fmt.Errorf("bad slice item type: %v", err) + } + arg.ItemType = &itemType + default: + return Argument{}, fmt.Errorf("type has unsupported kind %s", rawType.Kind()) + } + + return arg, nil +} + +// TargetType describes which kind of node a given marker is associated with. +type TargetType int + +const ( + // DescribesPackage indicates that a marker is associated with a package. + DescribesPackage TargetType = iota + // DescribesType indicates that a marker is associated with a type declaration. + DescribesType + // DescribesField indicates that a marker is associated with a struct field. + DescribesField +) + +// Definition is a parsed definition of a marker. +type Definition struct { + // Output is the deserialized Go type of the marker. + Output reflect.Type + // Name is the marker's name. + Name string + // Target indicates which kind of node this marker can be associated with. + Target TargetType + // Fields lists out the types of each field that this marker has, by + // argument name as used in the marker (if the output type isn't a struct, + // it'll have a single, blank field name). This only lists exported fields, + // (as per reflection rules). + Fields map[string]Argument + // FieldNames maps argument names (as used in the marker) to struct field name + // in the output type. + FieldNames map[string]string + // Strict indicates that this definition should error out when parsing if + // not all non-optional fields were seen. + Strict bool +} + +// AnonymousField indicates that the definition has one field, +// (actually the original object), and thus the field +// doesn't get named as part of the name. +func (d *Definition) AnonymousField() bool { + if len(d.Fields) != 1 { + return false + } + _, hasAnonField := d.Fields[""] + return hasAnonField +} + +// Empty indicates that this definition has no fields. +func (d *Definition) Empty() bool { + return len(d.Fields) == 0 +} + +// loadFields uses reflection to populate argument information from the Output type. +func (d *Definition) loadFields() error { + if d.Fields == nil { + d.Fields = make(map[string]Argument) + d.FieldNames = make(map[string]string) + } + if d.Output.Kind() != reflect.Struct { + // anonymous field type + argType, err := ArgumentFromType(d.Output) + if err != nil { + return err + } + d.Fields[""] = argType + d.FieldNames[""] = "" + return nil + } + + for i := 0; i < d.Output.NumField(); i++ { + field := d.Output.Field(i) + if field.PkgPath != "" { + // as per the reflect package docs, pkgpath is empty for exported fields, + // so non-empty package path means a private field, which we should skip + continue + } + argName := lowerCamelCase(field.Name) + markerTag, tagSpecified := field.Tag.Lookup("marker") + markerTagParts := strings.Split(markerTag, ",") + + if tagSpecified && markerTagParts[0] != "" { + // allow overriding to support legacy cases where we don't follow camelCase conventions + argName = markerTagParts[0] + } + + argType, err := ArgumentFromType(field.Type) + if err != nil { + return fmt.Errorf("unable to extract type information for field %q: %v", field.Name, err) + } + + if argType.Type == RawType { + return fmt.Errorf("RawArguments must be the direct type of a marker, and not a field") + } + + for _, tagOption := range markerTagParts[1:] { + switch tagOption { + case "optional": + argType.Optional = true + } + } + + d.Fields[argName] = argType + d.FieldNames[argName] = field.Name + } + + return nil +} + +// parserScanner makes a new scanner appropriate for use in parsing definitions and arguments. +func parserScanner(raw string, err func(*sc.Scanner, string)) *sc.Scanner { + scanner := &sc.Scanner{} + scanner.Init(bytes.NewBufferString(raw)) + scanner.Mode = sc.ScanIdents | sc.ScanInts | sc.ScanStrings | sc.ScanRawStrings | sc.SkipComments + scanner.Error = err + + return scanner +} + +// Parse uses the type information in this Definition to parse the given +// raw marker in the form `+a:b:c=arg,d=arg` into an output object of the +// type specified in the definition. +func (d *Definition) Parse(rawMarker string) (interface{}, error) { + name, anonName, fields := splitMarker(rawMarker) + + out := reflect.Indirect(reflect.New(d.Output)) + + // if we're a not a struct or have no arguments, treat the full `a:b:c` as the name, + // otherwise, treat `c` as a field name, and `a:b` as the marker name. + if !d.AnonymousField() && !d.Empty() && len(anonName) >= len(name)+1 { + fields = anonName[len(name)+1:] + "=" + fields + } + + var errs []error + scanner := parserScanner(fields, func(_ *sc.Scanner, msg string) { + errs = append(errs, errors.New(msg)) + }) + + // TODO(directxman12): strict parsing where we error out if certain fields aren't optional + seen := make(map[string]struct{}, len(d.Fields)) + if d.AnonymousField() && scanner.Peek() != sc.EOF { + // no need for trying to parse field names if we're not a struct + field := d.Fields[""] + field.Parse(scanner, fields, out) + seen[""] = struct{}{} // mark as seen for strict definitions + } else if !d.Empty() && scanner.Peek() != sc.EOF { + // if we expect *and* actually have arguments passed + for { + // parse the argument name + if !expect(scanner, sc.Ident, "argument name") { + break + } + argName := scanner.TokenText() + if !expect(scanner, '=', "equals") { + break + } + + // make sure we know the field + fieldName, known := d.FieldNames[argName] + if !known { + scanner.Error(scanner, fmt.Sprintf("unknown argument %q", argName)) + break + } + fieldType, known := d.Fields[argName] + if !known { + scanner.Error(scanner, fmt.Sprintf("unknown argument %q", argName)) + break + } + seen[argName] = struct{}{} // mark as seen for strict definitions + + // parse the field value + fieldVal := out.FieldByName(fieldName) + if !fieldVal.CanSet() { + scanner.Error(scanner, fmt.Sprintf("cannot set field %q (might not exist)", fieldName)) + break + } + fieldType.Parse(scanner, fields, fieldVal) + + if len(errs) > 0 { + break + } + + if scanner.Peek() == sc.EOF { + break + } + if !expect(scanner, ',', "comma") { + break + } + } + } + + if tok := scanner.Scan(); tok != sc.EOF { + scanner.Error(scanner, fmt.Sprintf("extra arguments provided: %q", fields[scanner.Position.Offset:])) + } + + if d.Strict { + for argName, arg := range d.Fields { + if _, wasSeen := seen[argName]; !wasSeen && !arg.Optional { + scanner.Error(scanner, fmt.Sprintf("missing argument %q", argName)) + } + } + } + + return out.Interface(), loader.MaybeErrList(errs) +} + +// MakeDefinition constructs a definition from a name, type, and the output type. +// All such definitions are strict by default. If a struct is passed as the output +// type, its public fields will automatically be populated into Fields (and similar +// fields in Definition). Other values will have a single, empty-string-named Fields +// entry. +func MakeDefinition(name string, target TargetType, output interface{}) (*Definition, error) { + def := &Definition{ + Name: name, + Target: target, + Output: reflect.TypeOf(output), + Strict: true, + } + + if err := def.loadFields(); err != nil { + return nil, err + } + + return def, nil +} + +// splitMarker takes a marker in the form of `+a:b:c=arg,d=arg` and splits it +// into the name (`a:b`), the name if it's not a struct (`a:b:c`), and the parts +// that are definitely fields (`arg,d=arg`). +func splitMarker(raw string) (name string, anonymousName string, restFields string) { + raw = raw[1:] // get rid of the leading '+' + nameFieldParts := strings.SplitN(raw, "=", 2) + if len(nameFieldParts) == 1 { + return nameFieldParts[0], nameFieldParts[0], "" + } + anonymousName = nameFieldParts[0] + name = anonymousName + restFields = nameFieldParts[1] + + nameParts := strings.Split(name, ":") + if len(nameParts) > 1 { + name = strings.Join(nameParts[:len(nameParts)-1], ":") + } + return name, anonymousName, restFields +} + +// Registry keeps track of registered definitions, and allows for easy lookup. +// It's thread-safe, and the zero-value can be safely used. +type Registry struct { + forPkg map[string]*Definition + forType map[string]*Definition + forField map[string]*Definition + + mu sync.RWMutex + initOnce sync.Once +} + +func (r *Registry) init() { + r.initOnce.Do(func() { + if r.forPkg == nil { + r.forPkg = make(map[string]*Definition) + } + if r.forType == nil { + r.forType = make(map[string]*Definition) + } + if r.forField == nil { + r.forField = make(map[string]*Definition) + } + }) +} + +// Define defines a new marker with the given name, target, and output type. +// It's a shortcut around +// r.Register(MakeDefinition(name, target, obj)) +func (r *Registry) Define(name string, target TargetType, obj interface{}) error { + def, err := MakeDefinition(name, target, obj) + if err != nil { + return err + } + return r.Register(def) +} + +// Register registers the given marker definition with this registry for later lookup. +func (r *Registry) Register(def *Definition) error { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + switch def.Target { + case DescribesPackage: + r.forPkg[def.Name] = def + case DescribesType: + r.forType[def.Name] = def + case DescribesField: + r.forField[def.Name] = def + default: + return fmt.Errorf("unknown target type %v", def.Target) + } + return nil +} + +// Lookup fetches the definition corresponding to the given name and target type. +func (r *Registry) Lookup(name string, target TargetType) *Definition { + r.init() + + r.mu.RLock() + defer r.mu.RUnlock() + + switch target { + case DescribesPackage: + return tryAnonLookup(name, r.forPkg) + case DescribesType: + return tryAnonLookup(name, r.forType) + case DescribesField: + return tryAnonLookup(name, r.forField) + default: + return nil + } +} + +// AllDefinitions returns all marker definitions known to this registry. +func (r *Registry) AllDefinitions() []*Definition { + res := make([]*Definition, 0, len(r.forPkg)+len(r.forType)+len(r.forField)) + for _, def := range r.forPkg { + res = append(res, def) + } + for _, def := range r.forType { + res = append(res, def) + } + for _, def := range r.forField { + res = append(res, def) + } + return res +} + +// tryAnonLookup tries looking up the given marker as both an struct-based +// marker and an anonymous marker, returning whichever format matches first, +// preferring the longer (anonymous) name in case of conflicts. +func tryAnonLookup(name string, defs map[string]*Definition) *Definition { + // NB(directxman12): we look up anonymous names first to work with + // legacy style marker definitions that have a namespaced approach + // (e.g. deepcopy-gen, which uses `+k8s:deepcopy-gen=foo,bar` *and* + // `+k8s.io:deepcopy-gen:interfaces=foo`). + name, anonName, _ := splitMarker(name) + if def, exists := defs[anonName]; exists { + return def + } + + return defs[name] +} + +// Must panics on errors creating definitions. +func Must(def *Definition, err error) *Definition { + if err != nil { + panic(err) + } + return def +} diff --git a/pkg/markers/parse_test.go b/pkg/markers/parse_test.go new file mode 100644 index 000000000..d46c63cd6 --- /dev/null +++ b/pkg/markers/parse_test.go @@ -0,0 +1,244 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers_test + +import ( + "bytes" + "reflect" + sc "text/scanner" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-tools/pkg/markers" +) + +func mustDefine(reg *Registry, name string, target TargetType, obj interface{}) { + Expect(reg.Define(name, target, obj)).To(Succeed()) +} + +type wrappedMarkerVal string +type multiFieldStruct struct { + Str string + Int int + Bool bool + Any interface{} + PtrOpt *string + NormalOpt string `marker:",optional"` + DiffNamed string `marker:"other"` + BothTags string `marker:"both,optional"` + Slice []int + SliceOfSlice [][]int +} + +type allOptionalStruct struct { + OptStr string `marker:",optional"` + OptInt *int +} + +var _ = Describe("Parsing", func() { + var reg *Registry + + Context("of full markers", func() { + BeforeEach(func() { + reg = &Registry{} + + mustDefine(reg, "testing:empty", DescribesPackage, struct{}{}) + mustDefine(reg, "testing:anonymous:literal", DescribesPackage, "") + mustDefine(reg, "testing:anonymous:named", DescribesPackage, wrappedMarkerVal("")) + mustDefine(reg, "testing:raw", DescribesPackage, RawArguments("")) + mustDefine(reg, "testing:multiField", DescribesPackage, multiFieldStruct{}) + mustDefine(reg, "testing:allOptional", DescribesPackage, allOptionalStruct{}) + mustDefine(reg, "testing:anonymousOptional", DescribesPackage, (*int)(nil)) + mustDefine(reg, "testing:multi:segment", DescribesPackage, 0) + mustDefine(reg, "testing:parent", DescribesPackage, allOptionalStruct{}) + mustDefine(reg, "testing:parent:nested", DescribesPackage, "") + mustDefine(reg, "testing:tripleDefined", DescribesPackage, 0) + mustDefine(reg, "testing:tripleDefined", DescribesField, "") + mustDefine(reg, "testing:tripleDefined", DescribesType, false) + }) + + It("should parse name-only markers", parseTestCase{reg: ®, raw: "+testing:empty", output: struct{}{}}.Run) + + Context("when parsing anonymous markers", func() { + It("should parse into literal-typed values", parseTestCase{reg: ®, raw: "+testing:anonymous:literal=foo", output: "foo"}.Run) + It("should parse into named-typed values", parseTestCase{reg: ®, raw: "+testing:anonymous:named=foo", output: wrappedMarkerVal("foo")}.Run) + It("shouldn't require any argument to an optional-valued marker", parseTestCase{reg: ®, raw: "+testing:anonymousOptional", output: (*int)(nil)}.Run) + }) + + It("should parse raw-argument markers", parseTestCase{reg: ®, raw: "+testing:raw=this;totally,doesn't;get,parsed", output: RawArguments("this;totally,doesn't;get,parsed")}.Run) + + Context("when parsing multi-field markers", func() { + definitelyAString := "optional string" + It("should support parsing multiple fields with the appropriate names", parseTestCase{ + reg: ®, + raw: `+testing:multiField:str=some str,int=42,bool=true,any=21,ptrOpt="optional string",normalOpt="other string",other="yet another",both="and one more",slice=99;104;101;101;115;101,sliceOfSlice={{1,1},{2,3},{5,8}}`, + output: multiFieldStruct{ + Str: "some str", + Bool: true, + Any: 21, + PtrOpt: &definitelyAString, + NormalOpt: "other string", + DiffNamed: "yet another", + BothTags: "and one more", + Slice: []int{99, 104, 101, 101, 115, 101}, + SliceOfSlice: [][]int{{1, 1}, {2, 3}, {5, 8}}, + Int: 42, + }, + }.Run) + + It("shouldn't matter what order the fields are specified in", parseTestCase{ + reg: ®, + // just some random order with everything out of order + raw: `+testing:multiField:int=42,str=some str,any=21,bool=true,any=21,sliceOfSlice={{1,1},{2,3},{5,8}},slice=99;104;101;101;115;101,other="yet another"`, + output: multiFieldStruct{ + Str: "some str", + Bool: true, + Any: 21, + DiffNamed: "yet another", + Slice: []int{99, 104, 101, 101, 115, 101}, + SliceOfSlice: [][]int{{1, 1}, {2, 3}, {5, 8}}, + Int: 42, + }, + }.Run) + + It("should support leaving out optional fields", parseTestCase{ + reg: ®, + raw: `+testing:multiField:str=some str,bool=true,any=21,other="yet another",slice=99;104;101;101;115;101,sliceOfSlice={{1,1},{2,3},{5,8}},int=42`, + output: multiFieldStruct{ + Str: "some str", + Bool: true, + Any: 21, + DiffNamed: "yet another", + Slice: []int{99, 104, 101, 101, 115, 101}, + SliceOfSlice: [][]int{{1, 1}, {2, 3}, {5, 8}}, + Int: 42, + }, + }.Run) + It("should error out for missing values", func() { + By("looking up the marker definition") + raw := "+testing:multiField:str=`hi`" + defn := reg.Lookup(raw, DescribesPackage) + Expect(defn).NotTo(BeNil()) + + By("trying to parse the marker") + _, err := defn.Parse(raw) + Expect(err).To(HaveOccurred()) + }) + It("shouldn't require any arguments to an optional-valued marker", parseTestCase{reg: ®, raw: "+testing:allOptional", output: allOptionalStruct{}}.Run) + }) + + It("should support markers with multiple segments in the name", parseTestCase{reg: ®, raw: "+testing:multi:segment=42", output: 42}.Run) + + Context("when dealing with disambiguating anonymous markers", func() { + It("should favor the shorter-named one", parseTestCase{reg: ®, raw: "+testing:parent", output: allOptionalStruct{}}.Run) + It("should still allow fetching the longer-named one", parseTestCase{reg: ®, raw: "+testing:parent:nested=some string", output: "some string"}.Run) + It("should consider anonymously-named ones before considering fields", parseTestCase{reg: ®, raw: "+testing:parent:optStr=other string", output: allOptionalStruct{OptStr: "other string"}}.Run) + }) + + Context("when dealing with markers describing multiple things", func() { + It("should properly parse the package-level one", parseTestCase{reg: ®, raw: "+testing:tripleDefined=42", output: 42, target: DescribesPackage}.Run) + It("should properly parse the field-level one", parseTestCase{reg: ®, raw: "+testing:tripleDefined=foo", output: "foo", target: DescribesField}.Run) + It("should properly parse the type-level one", parseTestCase{reg: ®, raw: "+testing:tripleDefined=true", output: true, target: DescribesType}.Run) + }) + }) + + Context("of individual arguments", func() { + It("should support bare strings", argParseTestCase{arg: Argument{Type: StringType}, raw: `some string here!`, output: "some string here!"}.Run) + It("should support double-quoted strings", argParseTestCase{arg: Argument{Type: StringType}, raw: `"some; string, \nhere"`, output: "some; string, \nhere"}.Run) + It("should support raw strings", argParseTestCase{arg: Argument{Type: StringType}, raw: "`some; string, \\nhere`", output: `some; string, \nhere`}.Run) + It("should support integers", argParseTestCase{arg: Argument{Type: IntType}, raw: "42", output: 42}.Run) + XIt("should support negative integers", argParseTestCase{arg: Argument{Type: IntType}, raw: "-42", output: -42}.Run) + It("should support false booleans", argParseTestCase{arg: Argument{Type: BoolType}, raw: "false", output: false}.Run) + It("should support true booleans", argParseTestCase{arg: Argument{Type: BoolType}, raw: "true", output: true}.Run) + + sliceOSlice := Argument{Type: SliceType, ItemType: &Argument{Type: SliceType, ItemType: &Argument{Type: IntType}}} + sliceOSliceOut := [][]int{{1, 1}, {2, 3}, {5, 8}} + + It("should support bare slices", argParseTestCase{arg: Argument{Type: SliceType, ItemType: &Argument{Type: StringType}}, raw: "hi;hello;hey y'all", output: []string{"hi", "hello", "hey y'all"}}.Run) + It("should support delimitted slices", argParseTestCase{arg: Argument{Type: SliceType, ItemType: &Argument{Type: StringType}}, raw: "{hi,hello,hey y'all}", output: []string{"hi", "hello", "hey y'all"}}.Run) + It("should support delimitted slices of bare slices", argParseTestCase{arg: sliceOSlice, raw: "{1;1,2;3,5;8}", output: sliceOSliceOut}.Run) + It("should support delimitted slices of delimitted slices", argParseTestCase{arg: sliceOSlice, raw: "{{1,1},{2,3},{5,8}}", output: sliceOSliceOut}.Run) + + Context("with any value", func() { + anyArg := Argument{Type: AnyType} + It("should support bare strings", argParseTestCase{arg: anyArg, raw: `some string here!`, output: "some string here!"}.Run) + It("should support double-quoted strings", argParseTestCase{arg: anyArg, raw: `"some; string, \nhere"`, output: "some; string, \nhere"}.Run) + It("should support raw strings", argParseTestCase{arg: anyArg, raw: "`some; string, \\nhere`", output: `some; string, \nhere`}.Run) + It("should support integers", argParseTestCase{arg: anyArg, raw: "42", output: 42}.Run) + XIt("should support negative integers", argParseTestCase{arg: anyArg, raw: "-42", output: -42}.Run) + It("should support false booleans", argParseTestCase{arg: anyArg, raw: "false", output: false}.Run) + It("should support true booleans", argParseTestCase{arg: anyArg, raw: "true", output: true}.Run) + + sliceOSliceOut := [][]int{{1, 1}, {2, 3}, {5, 8}} + + It("should support bare slices", argParseTestCase{arg: anyArg, raw: "hi;hello;hey y'all", output: []string{"hi", "hello", "hey y'all"}}.Run) + It("should support delimitted slices", argParseTestCase{arg: anyArg, raw: "{hi,hello,hey y'all}", output: []string{"hi", "hello", "hey y'all"}}.Run) + It("should support delimitted slices of bare slices", argParseTestCase{arg: anyArg, raw: "{1;1,2;3,5;8}", output: sliceOSliceOut}.Run) + It("should support delimitted slices of delimitted slices", argParseTestCase{arg: anyArg, raw: "{{1,1},{2,3},{5,8}}", output: sliceOSliceOut}.Run) + }) + }) +}) + +type parseTestCase struct { + reg **Registry + raw string + output interface{} + target TargetType // NB(directxman12): iota is DescribesPackage +} + +func (tc parseTestCase) Run() { + reg := *tc.reg + By("looking up the marker definition") + defn := reg.Lookup(tc.raw, tc.target) + Expect(defn).NotTo(BeNil()) + + By("parsing the marker") + outVal, err := defn.Parse(tc.raw) + Expect(err).NotTo(HaveOccurred()) + + By("checking for the expected output") + Expect(outVal).To(Equal(tc.output)) +} + +type argParseTestCase struct { + arg Argument + raw string + output interface{} +} + +func (tc argParseTestCase) Run() { + scanner := sc.Scanner{} + scanner.Init(bytes.NewBufferString(tc.raw)) + scanner.Mode = sc.ScanIdents | sc.ScanInts | sc.ScanStrings | sc.ScanRawStrings | sc.SkipComments + scanner.Error = func(_ *sc.Scanner, msg string) { + Fail(msg) + } + + var actualOut reflect.Value + if tc.arg.Type == AnyType { + actualOut = reflect.Indirect(reflect.New(reflect.TypeOf((*interface{})(nil)).Elem())) + } else { + actualOut = reflect.Indirect(reflect.New(reflect.TypeOf(tc.output))) + } + + By("parsing the raw argument") + tc.arg.Parse(&scanner, tc.raw, actualOut) + + By("checking that it equals the expected output") + Expect(actualOut.Interface()).To(Equal(tc.output)) +} diff --git a/pkg/markers/zip.go b/pkg/markers/zip.go new file mode 100644 index 000000000..a9b4c98af --- /dev/null +++ b/pkg/markers/zip.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "go/ast" + "go/token" + "reflect" + "strings" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// extractDoc extracts documentation from the given node, skipping markers +// in the godoc and falling back to the decl if necessary (for single-line decls). +func extractDoc(node ast.Node, decl *ast.GenDecl) string { + var docs *ast.CommentGroup + switch docced := node.(type) { + case *ast.Field: + docs = docced.Doc + case *ast.File: + docs = docced.Doc + case *ast.GenDecl: + docs = docced.Doc + case *ast.TypeSpec: + docs = docced.Doc + // type Ident expr expressions get docs attached to the decl, + // so check for that case (missing Lparen == single line type decl) + if docs == nil && decl.Lparen == token.NoPos { + docs = decl.Doc + } + } + + if docs == nil { + return "" + } + + // filter out markers + var outGroup ast.CommentGroup + outGroup.List = make([]*ast.Comment, 0, len(docs.List)) + for _, comment := range docs.List { + if isMarkerComment(comment.Text) { + continue + } + outGroup.List = append(outGroup.List, comment) + } + + // split lines, and re-join together as a single + // paragraph, respecting double-newlines as + // paragraph markers. + outLines := strings.Split(outGroup.Text(), "\n") + if outLines[len(outLines)-1] == "" { + // chop off the extraneous last part + outLines = outLines[:len(outLines)-1] + } + // respect double-newline meaning actual newline + for i, line := range outLines { + if line == "" { + outLines[i] = "\n" + } + } + return strings.Join(outLines, " ") +} + +// PackageMarkers collects all the package-level marker values for the given package. +func PackageMarkers(col *Collector, pkg *loader.Package) (MarkerValues, error) { + markers, err := col.MarkersInPackage(pkg) + if err != nil { + return nil, err + } + res := make(MarkerValues) + for _, file := range pkg.Syntax { + fileMarkers := markers[file] + for name, vals := range fileMarkers { + res[name] = append(res[name], vals...) + } + } + + return res, nil +} + +// FieldInfo contains marker values and commonly used information for a struct field. +type FieldInfo struct { + // Name is the name of the field (or "" for embedded fields) + Name string + // Doc is the Godoc of the field, pre-processed to remove markers and joine + // single newlines together. + Doc string + // Tag struct tag associated with this field (or "" if non existed). + Tag reflect.StructTag + + // Markers are all registered markers associated with this field. + Markers MarkerValues + + // RawField is the raw, underlying field AST object that this field represents. + RawField *ast.Field +} + +// TypeInfo contains marker values and commonly used information for a type declaration. +type TypeInfo struct { + // Name is the name of the type. + Name string + // Doc is the Godoc of the type, pre-processed to remove markers and joine + // single newlines together. + Doc string + + // Markers are all registered markers associated with the type. + Markers MarkerValues + + // Fields are all the fields associated with the type, if it's a struct. + // (if not, Fields will be nil). + Fields []FieldInfo + + // RawDecl contains the raw GenDecl that the type was declared as part of. + RawDecl *ast.GenDecl + // RawSpec contains the raw Spec that declared this type. + RawSpec *ast.TypeSpec + // RawFile contains the file in which this type was declared. + RawFile *ast.File +} + +// TypeCallback is a callback called for each type declaration in a package. +type TypeCallback func(info *TypeInfo) + +// EachType collects all markers, then calls the given callback for each type declaration in a package. +// Each individual spec is considered separate, so +// +// type ( +// Foo string +// Bar int +// Baz struct{} +// ) +// +// yields three calls to the callback. +func EachType(col *Collector, pkg *loader.Package, cb TypeCallback) error { + markers, err := col.MarkersInPackage(pkg) + if err != nil { + return err + } + + loader.EachType(pkg, func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) { + var fields []FieldInfo + if structSpec, isStruct := spec.Type.(*ast.StructType); isStruct { + for _, field := range structSpec.Fields.List { + for _, name := range field.Names { + fields = append(fields, FieldInfo{ + Name: name.Name, + Doc: extractDoc(field, nil), + Tag: loader.ParseAstTag(field.Tag), + Markers: markers[field], + RawField: field, + }) + } + if field.Names == nil { + fields = append(fields, FieldInfo{ + Doc: extractDoc(field, nil), + Tag: loader.ParseAstTag(field.Tag), + Markers: markers[field], + RawField: field, + }) + } + } + } + + cb(&TypeInfo{ + Name: spec.Name.Name, + Markers: markers[spec], + Doc: extractDoc(spec, decl), + Fields: fields, + RawDecl: decl, + RawSpec: spec, + RawFile: file, + }) + }) + + return nil +} From 1e02d93a747c096f286cca0a74e6c5d362d9f5a4 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 21:18:42 -0700 Subject: [PATCH 04/14] Remove old generation and parsing logic This removed the generation and parsing logic that's being superseded by loader, markers, etc. --- cmd/crd/cmd/generate.go | 72 --- cmd/crd/cmd/root.go | 44 -- cmd/crd/main.go | 23 - pkg/crd/generator/generator.go | 217 -------- pkg/crd/generator/generator_test.go | 68 --- pkg/crd/util/util.go | 130 ----- pkg/crd/v2/crd.go | 559 --------------------- pkg/crd/v2/definition_checker.go | 34 -- pkg/crd/v2/definition_pruner.go | 111 ---- pkg/crd/v2/embed.go | 92 ---- pkg/crd/v2/flattener.go | 85 ---- pkg/crd/v2/generator.go | 257 ---------- pkg/crd/v2/multiversion.go | 113 ----- pkg/crd/v2/multiversion_test.go | 165 ------ pkg/crd/v2/parser.go | 288 ----------- pkg/crd/v2/singleversion.go | 132 ----- pkg/crd/v2/singleversion_test.go | 204 -------- pkg/crd/v2/types.go | 33 -- pkg/crd/v2/utils.go | 194 ------- pkg/crd/v2/validation.go | 184 ------- pkg/crd/v2/writer.go | 104 ---- pkg/internal/codegen/parse/apis.go | 287 ----------- pkg/internal/codegen/parse/context.go | 42 -- pkg/internal/codegen/parse/crd.go | 639 ------------------------ pkg/internal/codegen/parse/index.go | 161 ------ pkg/internal/codegen/parse/parser.go | 151 ------ pkg/internal/codegen/parse/util.go | 539 -------------------- pkg/internal/codegen/parse/util_test.go | 216 -------- pkg/internal/codegen/types.go | 213 -------- pkg/internal/general/util.go | 102 ---- pkg/rbac/manifests.go | 170 ------- pkg/util/util.go | 77 --- 32 files changed, 5706 deletions(-) delete mode 100644 cmd/crd/cmd/generate.go delete mode 100644 cmd/crd/cmd/root.go delete mode 100644 cmd/crd/main.go delete mode 100644 pkg/crd/generator/generator.go delete mode 100644 pkg/crd/generator/generator_test.go delete mode 100644 pkg/crd/util/util.go delete mode 100644 pkg/crd/v2/crd.go delete mode 100644 pkg/crd/v2/definition_checker.go delete mode 100644 pkg/crd/v2/definition_pruner.go delete mode 100644 pkg/crd/v2/embed.go delete mode 100644 pkg/crd/v2/flattener.go delete mode 100644 pkg/crd/v2/generator.go delete mode 100644 pkg/crd/v2/multiversion.go delete mode 100644 pkg/crd/v2/multiversion_test.go delete mode 100644 pkg/crd/v2/parser.go delete mode 100644 pkg/crd/v2/singleversion.go delete mode 100644 pkg/crd/v2/singleversion_test.go delete mode 100644 pkg/crd/v2/types.go delete mode 100644 pkg/crd/v2/utils.go delete mode 100644 pkg/crd/v2/validation.go delete mode 100644 pkg/crd/v2/writer.go delete mode 100644 pkg/internal/codegen/parse/apis.go delete mode 100644 pkg/internal/codegen/parse/context.go delete mode 100644 pkg/internal/codegen/parse/crd.go delete mode 100644 pkg/internal/codegen/parse/index.go delete mode 100644 pkg/internal/codegen/parse/parser.go delete mode 100644 pkg/internal/codegen/parse/util.go delete mode 100644 pkg/internal/codegen/parse/util_test.go delete mode 100644 pkg/internal/codegen/types.go delete mode 100644 pkg/internal/general/util.go delete mode 100644 pkg/rbac/manifests.go delete mode 100644 pkg/util/util.go diff --git a/cmd/crd/cmd/generate.go b/cmd/crd/cmd/generate.go deleted file mode 100644 index 32c9ade73..000000000 --- a/cmd/crd/cmd/generate.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "fmt" - "log" - - "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" -) - -var g *crdgenerator.Generator - -// GenerateCmd represents the generate command -var GenerateCmd = &cobra.Command{ - Use: "generate", - Short: "Generate CRD install files for the resource definition", - Long: `Generate CRD install files for the resource definition. - -- if param domain is specified, it will take given root-path as working directory, -and take current path if root-path not specified; note that api path "pkg/apis" must -exist under the root-path. -- if param domain is not specified, then it will search parent directories of root path -for PROJECT file, take its path as working directory, and fetch domain info from the file. -`, - Example: "crd generate --domain k8s.io", - Run: func(_ *cobra.Command, _ []string) { - fmt.Println("Writing CRD files...") - if err := g.ValidateAndInitFields(); err != nil { - log.Fatal(err) - } - if err := g.Do(); err != nil { - log.Fatal(err) - } - fmt.Printf("CRD files generated, files can be found under path %s.\n", g.OutputDir) - }, -} - -func init() { - rootCmd.AddCommand(GenerateCmd) - g = GeneratorForFlags(GenerateCmd.Flags()) -} - -// GeneratorForFlags registers flags for Generator fields and returns the Generator. -func GeneratorForFlags(f *flag.FlagSet) *crdgenerator.Generator { - g := &crdgenerator.Generator{} - f.StringVar(&g.RootPath, "root-path", "", "working dir, must have PROJECT file under the path or parent path if domain not set") - f.StringVar(&g.OutputDir, "output-dir", "", "output directory, default to 'config/crds' under root path") - f.StringVar(&g.Domain, "domain", "", "domain of the resources, will try to fetch it from PROJECT file if not specified") - // TODO: Do we need this? Is there a possibility that a crd is namespace scoped? - f.StringVar(&g.Namespace, "namespace", "", "CRD namespace, treat it as root scoped if not set") - f.BoolVar(&g.SkipMapValidation, "skip-map-validation", true, "if set to true, skip generating validation schema for map type in CRD.") - f.StringVar(&g.APIsPath, "apis-path", "pkg/apis", "the path to search for apis relative to the current directory") - f.StringVar(&g.APIsPkg, "apis-pkg", "", "the absolute Go pkg name for current project's api pkg.") - return g -} diff --git a/cmd/crd/cmd/root.go b/cmd/crd/cmd/root.go deleted file mode 100644 index bedf94d03..000000000 --- a/cmd/crd/cmd/root.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" -) - -// rootCmd represents the base command when called without any subcommands -var rootCmd = &cobra.Command{ - Use: "crd", - Short: "A reference implementation tool for CRD.", - Long: `A reference implementation tool for CRD.`, - Example: ` - # Generate CRD install files - crd generate -`, -} - -// Execute adds all child commands to the root command and sets flags appropriately. -// This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/cmd/crd/main.go b/cmd/crd/main.go deleted file mode 100644 index efb0c702d..000000000 --- a/cmd/crd/main.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import "sigs.k8s.io/controller-tools/cmd/crd/cmd" - -func main() { - cmd.Execute() -} diff --git a/pkg/crd/generator/generator.go b/pkg/crd/generator/generator.go deleted file mode 100644 index f8b98fd1d..000000000 --- a/pkg/crd/generator/generator.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generator - -import ( - "fmt" - "log" - "os" - "path" - "strings" - - "github.com/ghodss/yaml" - "github.com/spf13/afero" - extensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/gengo/args" - "k8s.io/gengo/types" - crdutil "sigs.k8s.io/controller-tools/pkg/crd/util" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" - "sigs.k8s.io/controller-tools/pkg/internal/codegen/parse" - "sigs.k8s.io/controller-tools/pkg/util" -) - -// Generator generates CRD manifests from API resource definitions defined in Go source files. -type Generator struct { - RootPath string - OutputDir string - Repo string - Domain string - Namespace string - SkipMapValidation bool - - // OutFs is filesystem to be used for writing out the result - OutFs afero.Fs - - // apisPkg is the absolute Go pkg name for current project's 'pkg/apis' pkg. - // This is needed to determine if a Type belongs to the project or it is a referred Type. - apisPkg string - - // APIsPath and APIsPkg allow customized generation for Go types existing under directories other than pkg/apis - APIsPath string - APIsPkg string -} - -// ValidateAndInitFields validate and init generator fields. -func (c *Generator) ValidateAndInitFields() error { - var err error - - if c.OutFs == nil { - c.OutFs = afero.NewOsFs() - } - - if len(c.RootPath) == 0 { - // Take current path as root path if not specified. - c.RootPath, err = os.Getwd() - if err != nil { - return err - } - } - - // Validate PROJECT file if Domain or Repo are not set manually - if len(c.Domain) == 0 || len(c.Repo) == 0 { - if !crdutil.PathHasProjectFile(c.RootPath) { - return fmt.Errorf("PROJECT file missing in dir %s", c.RootPath) - } - } - - if len(c.Repo) == 0 { - c.Repo = crdutil.GetRepoFromProject(c.RootPath) - } - - // If Domain is not explicitly specified, - // try to search for PROJECT file as a basis. - if len(c.Domain) == 0 { - c.Domain = crdutil.GetDomainFromProject(c.RootPath) - } - - err = c.setAPIsPkg() - if err != nil { - return err - } - - // Init output directory - if c.OutputDir == "" { - c.OutputDir = path.Join(c.RootPath, "config/crds") - } - - return nil -} - -// Do manages CRD generation. -func (c *Generator) Do() error { - arguments := args.Default() - b, err := arguments.NewBuilder() - if err != nil { - return fmt.Errorf("failed making a parser: %v", err) - } - - // Switch working directory to root path. - wd, err := os.Getwd() - if err != nil { - return err - } - if err := os.Chdir(c.RootPath); err != nil { - return fmt.Errorf("failed switching working dir: %v", err) - } - defer func() { - if err := os.Chdir(wd); err != nil { - log.Fatalf("Failed to switch back to original working dir: %v", err) - } - }() - - if err := b.AddDirRecursive(fmt.Sprintf("%s/%s", c.Repo, c.APIsPath)); err != nil { - return fmt.Errorf("failed making a parser: %v", err) - } - - ctx, err := parse.NewContext(b) - if err != nil { - return fmt.Errorf("failed making a context: %v", err) - } - - arguments.CustomArgs = &parse.Options{SkipMapValidation: c.SkipMapValidation} - - // TODO: find an elegant way to fulfill the domain in APIs. - p := parse.NewAPIs(ctx, arguments, c.Domain, c.apisPkg) - crds := c.getCrds(p) - - return c.writeCRDs(crds) -} - -func (c *Generator) writeCRDs(crds map[string][]byte) error { - // Ensure output dir exists. - if err := c.OutFs.MkdirAll(c.OutputDir, os.FileMode(0700)); err != nil { - return err - } - - for file, crd := range crds { - outFile := path.Join(c.OutputDir, file) - if err := (&util.FileWriter{Fs: c.OutFs}).WriteFile(outFile, crd); err != nil { - return err - } - } - return nil -} - -func getCRDFileName(resource *codegen.APIResource) string { - elems := []string{resource.Group, resource.Version, strings.ToLower(resource.Kind)} - return strings.Join(elems, "_") + ".yaml" -} - -func (c *Generator) getCrds(p *parse.APIs) map[string][]byte { - crds := map[string]extensionsv1beta1.CustomResourceDefinition{} - for _, g := range p.APIs.Groups { - for _, v := range g.Versions { - for _, r := range v.Resources { - crd := r.CRD - // ignore types which do not belong to this project - if !c.belongsToAPIsPkg(r.Type) { - continue - } - if len(c.Namespace) > 0 { - crd.Namespace = c.Namespace - } - fileName := getCRDFileName(r) - crds[fileName] = crd - } - } - } - - result := map[string][]byte{} - for file, crd := range crds { - b, err := yaml.Marshal(crd) - if err != nil { - log.Fatalf("Error: %v", err) - } - result[file] = b - } - - return result -} - -// belongsToAPIsPkg returns true if type t is defined under pkg/apis pkg of -// current project. -func (c *Generator) belongsToAPIsPkg(t *types.Type) bool { - return strings.HasPrefix(t.Name.Package, c.apisPkg) -} - -func (c *Generator) setAPIsPkg() error { - if c.APIsPath == "" { - c.APIsPath = "pkg/apis" - } - - c.apisPkg = c.APIsPkg - if c.apisPkg == "" { - // Validate apis directory exists under working path - apisPath := path.Join(c.RootPath, c.APIsPath) - if _, err := os.Stat(apisPath); err != nil { - return fmt.Errorf("error validating apis path %s: %v", apisPath, err) - } - - c.apisPkg = path.Join(c.Repo, c.APIsPath) - } - return nil -} diff --git a/pkg/crd/generator/generator_test.go b/pkg/crd/generator/generator_test.go deleted file mode 100644 index c11ba51c8..000000000 --- a/pkg/crd/generator/generator_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generator_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/spf13/afero" - crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" -) - -func TestGenerator(t *testing.T) { - currDir, err := os.Getwd() - if err != nil { - t.Fatalf("unable to get current directory: %v", err) - } - testDataDir := filepath.Join(currDir, "..", "..", "..", "testData") - // in-memory file system for storing the generated CRDs - outFs := afero.NewMemMapFs() - g := &crdgenerator.Generator{ - OutFs: outFs, - OutputDir: "/tmp", - RootPath: testDataDir, - } - err = g.ValidateAndInitFields() - if err != nil { - t.Fatalf("generator validate should have succeeded %v", err) - } - - err = g.Do() - if err != nil { - t.Fatalf("generator validate should have succeeded %v", err) - } - for _, f := range []string{"fun_v1alpha1_toy.yaml"} { - crdOuputFile := filepath.Join("/tmp", f) - crdContent, err := afero.ReadFile(outFs, crdOuputFile) - if err != nil { - t.Fatalf("reading file failed %v", err) - } - expectedContent, err := ioutil.ReadFile(filepath.Join(testDataDir, "config", "crds", f)) - if err != nil { - t.Fatalf("reading file failed %v", err) - } - if !reflect.DeepEqual(crdContent, expectedContent) { - t.Fatalf("CRD output does not match exp:%v got:%v \n", string(expectedContent), string(crdContent)) - } - } - // examine content of the in-memory filesystem - // outFs.(*afero.MemMapFs).List() -} diff --git a/pkg/crd/util/util.go b/pkg/crd/util/util.go deleted file mode 100644 index 559e60227..000000000 --- a/pkg/crd/util/util.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bufio" - "fmt" - gobuild "go/build" - "log" - "os" - "path" - "path/filepath" - "strings" -) - -// IsGoSrcPath validate if given path is of path $GOPATH/src. -func IsGoSrcPath(filePath string) bool { - for _, gopath := range getGoPaths() { - goSrc := path.Join(gopath, "src") - if filePath == goSrc { - return true - } - } - - return false -} - -// IsUnderGoSrcPath validate if given path is under path $GOPATH/src. -func IsUnderGoSrcPath(filePath string) bool { - for _, gopath := range getGoPaths() { - goSrc := path.Join(gopath, "src") - if strings.HasPrefix(filepath.Dir(filePath), goSrc) { - return true - } - } - - return false -} - -// DirToGoPkg returns the Gopkg for the given directory if it exists -// under a GOPATH otherwise returns error. For example, -// /Users/x/go/src/github.com/y/z ==> github.com/y/z -func DirToGoPkg(dir string) (pkg string, err error) { - goPaths := getGoPaths() - for _, gopath := range goPaths { - goSrc := path.Join(gopath, "src") - if !strings.HasPrefix(dir, goSrc) { - continue - } - pkg, err := filepath.Rel(goSrc, dir) - if err == nil { - return pkg, err - } - } - - return "", fmt.Errorf("dir '%s' does not exist under any GOPATH %v", dir, goPaths) -} - -func getGoPaths() []string { - gopaths := os.Getenv("GOPATH") - if len(gopaths) == 0 { - gopaths = gobuild.Default.GOPATH - } - return filepath.SplitList(gopaths) -} - -// PathHasProjectFile validate if PROJECT file exists under the path. -func PathHasProjectFile(filePath string) bool { - if _, err := os.Stat(path.Join(filePath, "PROJECT")); os.IsNotExist(err) { - return false - } - - return true -} - -// GetDomainFromProject get domain information from the PROJECT file under the path. -func GetDomainFromProject(rootPath string) string { - return GetFieldFromProject("domain", rootPath) -} - -// GetRepoFromProject get domain information from the PROJECT file under the path. -func GetRepoFromProject(rootPath string) string { - return GetFieldFromProject("repo", rootPath) -} - -// GetFieldFromProject get field information from the PROJECT file under the path. -func GetFieldFromProject(fieldKey string, rootPath string) string { - var fieldVal string - - file, err := os.Open(path.Join(rootPath, "PROJECT")) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := file.Close(); err != nil { - log.Fatal(err) - } - }() - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), fmt.Sprintf("%s:", fieldKey)) { - fieldInfo := strings.Split(scanner.Text(), ":") - if len(fieldInfo) != 2 { - log.Fatalf("Unexpected %s info: %s", fieldKey, scanner.Text()) - } - fieldVal = strings.Replace(fieldInfo[1], " ", "", -1) - break - } - } - if len(fieldVal) == 0 { - log.Fatalf("%s/PROJECT file is missing value for '%s'", rootPath, fieldKey) - } - - return fieldVal -} diff --git a/pkg/crd/v2/crd.go b/pkg/crd/v2/crd.go deleted file mode 100644 index a18db6e64..000000000 --- a/pkg/crd/v2/crd.go +++ /dev/null @@ -1,559 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "fmt" - "log" - "path/filepath" - "strconv" - "strings" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -// methods under this file is mostly copied from controller-tools old generator's parsing logic. -// A lot of places are not consistent :( -// This should be cleanup - -// parseCRDs populates the CRD field of each Group.Version.Resource, -// creating validations using the annotations on type fields. -func parseCRDs(comments []string) *v1beta1.CustomResourceDefinitionSpec { - if !IsAPIResource(comments) { - return nil - } - - crdVersion := v1beta1.CustomResourceDefinitionVersion{ - Name: parseVersion(comments), - Served: true, - Storage: isStorageVersion(comments), - } - - crdSpec := &v1beta1.CustomResourceDefinitionSpec{ - Group: "", - Names: v1beta1.CustomResourceDefinitionNames{}, - Versions: []v1beta1.CustomResourceDefinitionVersion{crdVersion}, - } - - if IsNonNamespaced(comments) { - crdSpec.Scope = "Cluster" - } else { - crdSpec.Scope = "Namespaced" - } - - if hasCategories(comments) { - categoriesTag := getCategoriesTag(comments) - categories := strings.Split(categoriesTag, ",") - crdSpec.Names.Categories = categories - } - - if hasSingular(comments) { - singularName := getSingularName(comments) - crdSpec.Names.Singular = singularName - } - - if hasStatusSubresource(comments) { - if crdSpec.Subresources == nil { - crdSpec.Subresources = &v1beta1.CustomResourceSubresources{} - } - crdSpec.Subresources.Status = &v1beta1.CustomResourceSubresourceStatus{} - } - - if hasScaleSubresource(comments) { - if crdSpec.Subresources == nil { - crdSpec.Subresources = &v1beta1.CustomResourceSubresources{} - } - jsonPath, err := parseScaleParams(comments) - if err != nil { - log.Fatalf("failed in parsing CRD, error: %v", err.Error()) - } - crdSpec.Subresources.Scale = &v1beta1.CustomResourceSubresourceScale{ - SpecReplicasPath: jsonPath[specReplicasPath], - StatusReplicasPath: jsonPath[statusReplicasPath], - } - labelSelctor, ok := jsonPath[labelSelectorPath] - if ok && labelSelctor != "" { - crdSpec.Subresources.Scale.LabelSelectorPath = &labelSelctor - } - } - if hasPrintColumn(comments) { - result, err := parsePrintColumnParams(comments) - if err != nil { - log.Fatalf("failed to parse printcolumn annotations, error: %v", err.Error()) - } - crdSpec.Versions[0].AdditionalPrinterColumns = result - } - - rt, err := parseResourceAnnotation(comments) - if err != nil { - log.Fatalf("failed to parse resource annotations, error: %v", err.Error()) - } - crdSpec.Names.Plural = rt.Resource - if len(rt.ShortName) > 0 { - crdSpec.Names.ShortNames = strings.Split(rt.ShortName, ";") - } - - return crdSpec -} - -const ( - specReplicasPath = "specpath" - statusReplicasPath = "statuspath" - labelSelectorPath = "selectorpath" - jsonPathError = "invalid scale path. specpath, statuspath key-value pairs are required, only selectorpath key-value is optinal. For example: // +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=.spec.Label" - printColumnName = "name" - printColumnType = "type" - printColumnDescr = "description" - printColumnPath = "JSONPath" - printColumnFormat = "format" - printColumnPri = "priority" - printColumnError = "invalid printcolumn path. name,type, and JSONPath are required kye-value pairs and rest of the fields are optinal. For example: // +kubebuilder:printcolumn:name=abc,type=string,JSONPath=status" -) - -// IsAPIResource returns true if either of the two conditions become true: -// 1. t has a +resource/+kubebuilder:resource comment tag -// 2. t has TypeMeta and ObjectMeta in its member list. -func IsAPIResource(comments []string) bool { - for _, c := range comments { - if strings.Contains(c, "+resource") || strings.Contains(c, "+kubebuilder:resource") { - return true - } - } - - return false -} - -// IsNonNamespaced returns true if t has a +nonNamespaced comment tag -func IsNonNamespaced(comments []string) bool { - if !IsAPIResource(comments) { - return false - } - - for _, c := range comments { - if strings.Contains(c, "+genclient:nonNamespaced") { - return true - } - } - - return false -} - -// hasPrintColumn returns true if t has a +printcolumn or +kubebuilder:printcolumn annotation. -func hasPrintColumn(comments []string) bool { - for _, c := range comments { - if strings.Contains(c, "+printcolumn") || strings.Contains(c, "+kubebuilder:printcolumn") { - return true - } - } - return false -} - -// IsAPISubresource returns true if t has a +subresource-request comment tag -func IsAPISubresource(comments []string) bool { - for _, c := range comments { - if strings.Contains(c, "+subresource-request") { - return true - } - } - return false -} - -// HasSubresource returns true if t is an APIResource with one or more Subresources -func HasSubresource(comments []string) bool { - if !IsAPIResource(comments) { - return false - } - for _, c := range comments { - if strings.Contains(c, "subresource") { - return true - } - } - return false -} - -// hasStatusSubresource returns true if t is an APIResource annotated with -// +kubebuilder:subresource:status -func hasStatusSubresource(comments []string) bool { - if !IsAPIResource(comments) { - return false - } - for _, c := range comments { - if strings.Contains(c, "+kubebuilder:subresource:status") { - return true - } - } - return false -} - -// hasScaleSubresource returns true if t is an APIResource annotated with -// +kubebuilder:subresource:scale -func hasScaleSubresource(comments []string) bool { - if !IsAPIResource(comments) { - return false - } - for _, c := range comments { - if strings.Contains(c, "+kubebuilder:subresource:scale") { - return true - } - } - return false -} - -// hasCategories returns true if t is an APIResource annotated with -// +kubebuilder:categories -func hasCategories(comments []string) bool { - if !IsAPIResource(comments) { - return false - } - - for _, c := range comments { - if strings.Contains(c, "+kubebuilder:categories") { - return true - } - } - return false -} - -// HasDocAnnotation returns true if t is an APIResource with doc annotation -// +kubebuilder:doc -func HasDocAnnotation(comments []string) bool { - if !IsAPIResource(comments) { - return false - } - for _, c := range comments { - if strings.Contains(c, "+kubebuilder:doc") { - return true - } - } - return false -} - -// hasSingular returns true if t is an APIResource annotated with -// +kubebuilder:singular -func hasSingular(comments []string) bool { - if !IsAPIResource(comments) { - return false - } - for _, c := range comments { - if strings.Contains(c, "+kubebuilder:singular") { - return true - } - } - return false -} - -// resourceTags contains the tags present in a "+resource=" comment -type resourceTags struct { - Resource string - REST string - Strategy string - ShortName string -} - -// ParseKV parses key-value string formatted as "foo=bar" and returns key and value. -func ParseKV(s string) (key, value string, err error) { - kv := strings.Split(s, "=") - if len(kv) != 2 { - err = fmt.Errorf("invalid key value pair") - return key, value, err - } - key, value = kv[0], kv[1] - if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { - value = value[1 : len(value)-1] - } - return key, value, err -} - -// resourceAnnotationValue is a helper function to extract resource annotation. -func resourceAnnotationValue(tag string) (resourceTags, error) { - res := resourceTags{} - for _, elem := range strings.Split(tag, ",") { - key, value, err := ParseKV(elem) - if err != nil { - return resourceTags{}, fmt.Errorf("// +kubebuilder:resource: tags must be key value pairs. Expected "+ - "keys [path=] "+ - "Got string: [%s]", tag) - } - switch key { - case "path": - res.Resource = value - case "shortName": - res.ShortName = value - default: - return resourceTags{}, fmt.Errorf("The given input %s is invalid", value) - } - } - return res, nil -} - -// GetAnnotation extracts the annotation from comment text. -// It will return "foo" for comment "+kubebuilder:webhook:foo" . -func GetAnnotation(c, name string) string { - prefix := fmt.Sprintf("+%s:", name) - if strings.HasPrefix(c, prefix) { - return strings.TrimPrefix(c, prefix) - } - return "" -} - -// parseResourceAnnotation parses the tags in a "+resource=" comment into a resourceTags struct. -func parseResourceAnnotation(comments []string) (resourceTags, error) { - finalResult := resourceTags{} - var resourceAnnotationFound bool - for _, comment := range comments { - anno := GetAnnotation(comment, "kubebuilder:resource") - if len(anno) == 0 { - continue - } - result, err := resourceAnnotationValue(anno) - if err != nil { - return resourceTags{}, err - } - if resourceAnnotationFound { - return resourceTags{}, fmt.Errorf("resource annotation should only exists once per type") - } - resourceAnnotationFound = true - finalResult = result - } - return finalResult, nil -} - -// GetVersion returns version of t. -func GetVersion(pwd string) string { - return filepath.Base(pwd) -} - -// Comments is a structure for using comment tags on go structs and fields -type Comments []string - -// GetTags returns the value for the first comment with a prefix matching "+name=" -// e.g. "+name=foo\n+name=bar" would return "foo" -func (c Comments) getTag(name, sep string) string { // nolint: unparam - for _, c := range c { - prefix := fmt.Sprintf("+%s%s", name, sep) - if strings.HasPrefix(c, prefix) { - return strings.Replace(c, prefix, "", 1) - } - } - return "" -} - -// hasTag returns true if the Comments has a tag with the given name -func (c Comments) hasTag(name string) bool { - for _, c := range c { - prefix := fmt.Sprintf("+%s", name) - if strings.HasPrefix(c, prefix) { - return true - } - } - return false -} - -// GetTags returns the value for all comments with a prefix and separator. E.g. for "name" and "=" -// "+name=foo\n+name=bar" would return []string{"foo", "bar"} -func (c Comments) getTags(name, sep string) []string { - tags := []string{} - for _, c := range c { - prefix := fmt.Sprintf("+%s%s", name, sep) - if strings.HasPrefix(c, prefix) { - tags = append(tags, strings.Replace(c, prefix, "", 1)) - } - } - return tags -} - -// getKVTags returns the value for all comments with a prefix and separator. E.g. for "name" and "=" -// "+name=foo\n+name=bar" would return []string{"foo", "bar"} -func (c Comments) getKVTags(prefix, sep string) []string { // nolint: unparam - tags := []string{} - for _, c := range c { - if strings.HasPrefix(c, prefix) { - rawKVs := strings.Replace(c, prefix, "", 1) - for _, rawKV := range strings.Split(rawKVs, ",") { - strings.Split(rawKV, "=") - } - } - } - return tags -} - -// getCategoriesTag returns the value of the +kubebuilder:categories tags -func getCategoriesTag(comments []string) string { - cs := Comments(comments) - resource := cs.getTag("kubebuilder:categories", "=") - if len(resource) == 0 { - panic(fmt.Errorf("must specify +kubebuilder:categories comment")) - } - return resource -} - -// getSingularName returns the value of the +kubebuilder:singular tag -func getSingularName(comments []string) string { - cs := Comments(comments) - singular := cs.getTag("kubebuilder:singular", "=") - if len(singular) == 0 { - panic(fmt.Errorf("must specify a value to use with +kubebuilder:singular comment")) - } - return singular -} - -// Scale subresource requires specpath, statuspath, selectorpath key values, represents for JSONPath of -// SpecReplicasPath, StatusReplicasPath, LabelSelectorPath separately. e.g. -// +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath= -func parseScaleParams(comments []string) (map[string]string, error) { - jsonPath := make(map[string]string) - for _, c := range comments { - if strings.Contains(c, "+kubebuilder:subresource:scale") { - paths := strings.Replace(c, "+kubebuilder:subresource:scale:", "", -1) - path := strings.Split(paths, ",") - if len(path) < 2 { - return nil, fmt.Errorf(jsonPathError) - } - for _, s := range path { - kv := strings.Split(s, "=") - if kv[0] == specReplicasPath || kv[0] == statusReplicasPath || kv[0] == labelSelectorPath { - jsonPath[kv[0]] = kv[1] - } else { - return nil, fmt.Errorf(jsonPathError) - } - } - var ok bool - _, ok = jsonPath[specReplicasPath] - if !ok { - return nil, fmt.Errorf(jsonPathError) - } - _, ok = jsonPath[statusReplicasPath] - if !ok { - return nil, fmt.Errorf(jsonPathError) - } - return jsonPath, nil - } - } - return nil, fmt.Errorf(jsonPathError) -} - -// printColumnKV parses key-value string formatted as "foo=bar" and returns key and value. -func printColumnKV(s string) (key, value string, err error) { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - err = fmt.Errorf("invalid key value pair") - return key, value, err - } - key, value = kv[0], kv[1] - if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { - value = value[1 : len(value)-1] - } - return key, value, err -} - -// helperPrintColumn is a helper function for the parsePrintColumnParams to compute printer columns. -// TODO: reduce the cyclomatic complexity and remove next line -// nolint: gocyclo,goconst -func helperPrintColumn(parts string) (v1beta1.CustomResourceColumnDefinition, error) { - config := v1beta1.CustomResourceColumnDefinition{} - var count int - part := strings.Split(parts, ",") - if len(part) < 3 { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - - for _, elem := range strings.Split(parts, ",") { - key, value, err := printColumnKV(elem) - if err != nil { - return v1beta1.CustomResourceColumnDefinition{}, - fmt.Errorf("//+kubebuilder:printcolumn: tags must be key value pairs.Expected "+ - "keys [name=,type=,description=,format=] "+ - "Got string: [%s]", parts) - } - if key == printColumnName || key == printColumnType || key == printColumnPath { - count++ - } - switch key { - case printColumnName: - config.Name = value - case printColumnType: - if value == "integer" || value == "number" || value == "string" || value == "boolean" || value == "date" { - config.Type = value - } else { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnType) - } - case printColumnFormat: - if config.Type == "integer" && (value == "int32" || value == "int64") { - config.Format = value - } else if config.Type == "number" && (value == "float" || value == "double") { - config.Format = value - } else if config.Type == "string" && (value == "byte" || value == "date" || value == "date-time" || value == "password") { - config.Format = value - } else { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnFormat) - } - case printColumnPath: - config.JSONPath = value - case printColumnPri: - i, err := strconv.Atoi(value) - v := int32(i) - if err != nil { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnPri) - } - config.Priority = v - case printColumnDescr: - config.Description = value - default: - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - } - if count != 3 { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - return config, nil -} - -// printcolumn requires name,type,JSONPath fields and rest of the field are optional -// +kubebuilder:printcolumn:name=,type=,description=,JSONPath:<.spec.Name>,priority=,format= -func parsePrintColumnParams(comments []string) ([]v1beta1.CustomResourceColumnDefinition, error) { - result := []v1beta1.CustomResourceColumnDefinition{} - for _, comment := range comments { - if strings.Contains(comment, "+kubebuilder:printcolumn") { - parts := strings.Replace(comment, "+kubebuilder:printcolumn:", "", -1) - res, err := helperPrintColumn(parts) - if err != nil { - return []v1beta1.CustomResourceColumnDefinition{}, err - } - result = append(result, res) - } - } - return result, nil -} - -func parseVersion(comments []string) string { - return Comments(comments).getTag("kubebuilder:crd:version", "=") -} - -func isStorageVersion(comments []string) bool { - storage := strings.ToLower(Comments(comments).getTag("kubebuilder:crd:storage", "=")) - if len(storage) > 0 { - switch storage { - case "true": - return true - case "false": - return false - default: - log.Fatalf("the value associated with kubebuilder:crd:storage should either true or false") - } - } - return false -} diff --git a/pkg/crd/v2/definition_checker.go b/pkg/crd/v2/definition_checker.go deleted file mode 100644 index 808fd0829..000000000 --- a/pkg/crd/v2/definition_checker.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "fmt" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -func checkDefinitions(defs v1beta1.JSONSchemaDefinitions, startingTypes map[string]bool) { - fmt.Printf("Type checking Starting expecting %d types\n", len(defs)) - pruner := DefinitionPruner{defs, startingTypes} - newDefs := pruner.Prune(false) - if len(defs) != len(newDefs) { - fmt.Printf("Type checking failed. Expected %d actual %d\n", len(defs), len(newDefs)) - } else { - fmt.Println("Type checking PASSED") - } -} diff --git a/pkg/crd/v2/definition_pruner.go b/pkg/crd/v2/definition_pruner.go deleted file mode 100644 index 6243d8ed5..000000000 --- a/pkg/crd/v2/definition_pruner.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "fmt" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -// DefinitionPruner prunes unwanted definitions -type DefinitionPruner struct { - definitions v1beta1.JSONSchemaDefinitions - startingTypes map[string]bool -} - -// Prune prunes the definitions -func (pruner *DefinitionPruner) Prune(ignoreUnknownTypes bool) map[string]bool { - visitedDefs := make(map[string]bool) - queue := make([]string, 0) - // Push starting types into queue - for typeName := range pruner.startingTypes { - queue = append(queue, typeName) - } - - // Perform BFS and keep track of visited types - for len(queue) > 0 { - curType := queue[0] - queue = queue[1:] - // If already visited, skip it - if _, exists := visitedDefs[curType]; exists { - continue - } - // If no definitions present, (probably an external reference) - // Skip it - if _, exists := pruner.definitions[curType]; !exists { - if ignoreUnknownTypes { - continue - } else { - fmt.Println("Unknown type ", curType) - panic("Unknown type") - } - } - visitedDefs[curType] = true - curDef := pruner.definitions[curType] - queue = append(queue, processDefinition(&curDef)...) - } - - return visitedDefs -} - -func processDefinition(def *v1beta1.JSONSchemaProps) []string { - allTypes := []string{} - if def == nil { - return allTypes - } - if def.Ref != nil && len(*def.Ref) > 0 { - allTypes = append(allTypes, getNameFromURL(*def.Ref)) - } - allTypes = append(allTypes, processDefinitionMap(def.Definitions)...) - allTypes = append(allTypes, processDefinitionMap(def.Properties)...) - allTypes = append(allTypes, processDefinitionArray(def.AllOf)...) - allTypes = append(allTypes, processDefinitionArray(def.AnyOf)...) - allTypes = append(allTypes, processDefinitionArray(def.OneOf)...) - if def.AdditionalItems != nil { - allTypes = append(allTypes, processDefinition(def.AdditionalItems.Schema)...) - } - if def.Items != nil { - allTypes = append(allTypes, processDefinition(def.Items.Schema)...) - } - allTypes = append(allTypes, processDefinition(def.Not)...) - return allTypes -} - -func processDefinitionMap(defMap v1beta1.JSONSchemaDefinitions) []string { - allTypes := []string{} - if defMap == nil { - return allTypes - } - for key := range defMap { - def := defMap[key] - allTypes = append(allTypes, processDefinition(&def)...) - } - return allTypes -} - -func processDefinitionArray(defArray []v1beta1.JSONSchemaProps) []string { - allTypes := []string{} - if defArray == nil { - return allTypes - } - for i := range defArray { - def := defArray[i] - allTypes = append(allTypes, processDefinition(&def)...) - } - return allTypes -} diff --git a/pkg/crd/v2/embed.go b/pkg/crd/v2/embed.go deleted file mode 100644 index 43ad3ea5a..000000000 --- a/pkg/crd/v2/embed.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "log" - "strings" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -func embedSchema(defs map[string]v1beta1.JSONSchemaProps, startingTypes map[string]bool) map[string]v1beta1.JSONSchemaProps { - newDefs := map[string]v1beta1.JSONSchemaProps{} - for name := range startingTypes { - def := defs[name] - embedDefinition(&def, defs) - newDefs[name] = def - } - return newDefs -} - -func embedDefinition(def *v1beta1.JSONSchemaProps, refs map[string]v1beta1.JSONSchemaProps) { - if def == nil { - return - } - - if def.Ref != nil && len(*def.Ref) > 0 { - refName := strings.TrimPrefix(*def.Ref, defPrefix) - ref, ok := refs[refName] - if !ok { - log.Panicf("can't find the definition of %q", refName) - } - def.Properties = ref.Properties - def.Required = ref.Required - def.Type = ref.Type - def.Ref = nil - } - - def.Definitions = embedDefinitionMap(def.Definitions, refs) - def.Properties = embedDefinitionMap(def.Properties, refs) - // TODO: decide if we want to do this. - //def.AllOf = embedDefinitionArray(def.AllOf, refs) - def.AnyOf = embedDefinitionArray(def.AnyOf, refs) - def.OneOf = embedDefinitionArray(def.OneOf, refs) - if def.AdditionalItems != nil { - embedDefinition(def.AdditionalItems.Schema, refs) - } - if def.Items != nil { - embedDefinition(def.Items.Schema, refs) - } - embedDefinition(def.Not, refs) -} - -func embedDefinitionMap(defs map[string]v1beta1.JSONSchemaProps, refs map[string]v1beta1.JSONSchemaProps) map[string]v1beta1.JSONSchemaProps { - newDefs := map[string]v1beta1.JSONSchemaProps{} - for i := range defs { - def := defs[i] - embedDefinition(&def, refs) - newDefs[i] = def - } - if len(newDefs) == 0 { - return nil - } - return newDefs -} - -func embedDefinitionArray(defs []v1beta1.JSONSchemaProps, refs map[string]v1beta1.JSONSchemaProps) []v1beta1.JSONSchemaProps { - newDefs := make([]v1beta1.JSONSchemaProps, len(defs)) - for i := range defs { - def := defs[i] - embedDefinition(&def, refs) - newDefs[i] = def - } - if len(newDefs) == 0 { - return nil - } - return newDefs -} diff --git a/pkg/crd/v2/flattener.go b/pkg/crd/v2/flattener.go deleted file mode 100644 index 493cd3e17..000000000 --- a/pkg/crd/v2/flattener.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -// Recursively flattens "allOf" tags. If there is cyclic -// dependency, execution is aborted. -func recursiveFlatten(defs v1beta1.JSONSchemaDefinitions, definition *v1beta1.JSONSchemaProps, defName string, visited *map[string]bool) *v1beta1.JSONSchemaProps { - if len(definition.AllOf) == 0 { - return definition - } - isAlreadyVisited := (*visited)[defName] - if isAlreadyVisited { - panic("Cycle detected in definitions") - } - (*visited)[defName] = true - - aggregatedDef := &v1beta1.JSONSchemaProps{ - Properties: definition.Properties, - Required: definition.Required, - Type: definition.Type, - } - for _, allOfDef := range definition.AllOf { - var newDef *v1beta1.JSONSchemaProps - if allOfDef.Ref != nil && len(*allOfDef.Ref) > 0 { - // If the definition has $ref url, fetch the referred resource - // after flattening it. - nameOfRef := getNameFromURL(*allOfDef.Ref) - def := defs[nameOfRef] - newDef = recursiveFlatten(defs, &def, nameOfRef, visited) - } else { - newDef = &allOfDef - } - mergeDefinitions(aggregatedDef, newDef) - } - - delete(*visited, defName) - return aggregatedDef -} - -// Merges the properties from the 'rhsDef' to the 'lhsDef'. -// Also transfers the description as well. -func mergeDefinitions(lhsDef *v1beta1.JSONSchemaProps, rhsDef *v1beta1.JSONSchemaProps) { - if lhsDef == nil || rhsDef == nil { - return - } - // At this point, both defs will not have any 'AnyOf' defs. - // 1. Add all the properties from rhsDef to lhsDef - if lhsDef.Properties == nil { - lhsDef.Properties = make(map[string]v1beta1.JSONSchemaProps) - } - for propKey := range rhsDef.Properties { - lhsDef.Properties[propKey] = rhsDef.Properties[propKey] - } - // 2. Transfer the description - lhsDef.Description = rhsDef.Description - // 3. Merge required fields - lhsDef.Required = append(lhsDef.Required, rhsDef.Required...) -} - -// Flattens the schema by inlining 'allOf' tags. -func flattenAllOf(defs v1beta1.JSONSchemaDefinitions) { - for nameOfDef := range defs { - visited := make(map[string]bool) - def := defs[nameOfDef] - defs[nameOfDef] = *recursiveFlatten(defs, &def, nameOfDef, &visited) - } -} diff --git a/pkg/crd/v2/generator.go b/pkg/crd/v2/generator.go deleted file mode 100644 index b43eecfeb..000000000 --- a/pkg/crd/v2/generator.go +++ /dev/null @@ -1,257 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "fmt" - "go/ast" - "strings" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -const ( - defPrefix = "#/definitions/" - inlineTag = "inline" -) - -// Checks whether the typeName represents a simple json type - -// Removes a character by replacing it with a space -func removeChar(str string, removedStr string) string { - return strings.Replace(str, removedStr, " ", -1) -} - -// This is a hacky function that does the one job of -// extracting the tag values in the structs -// Example struct: -// type MyType struct { -// MyField string `yaml:"myField,omitempty"` -// } -// -// From the above example struct, we need to extract -// and return this: ("myField", "omitempty") -func extractFromTag(tag *ast.BasicLit) (string, string) { - if tag == nil || tag.Value == "" { - return "", "" - } - tagValue := tag.Value - - tagValue = removeChar(tagValue, "`") - tagValue = removeChar(tagValue, `"`) - tagValue = strings.TrimSpace(tagValue) - - var tagContent, tagKey string - fmt.Sscanf(tagValue, `%s %s`, &tagKey, &tagContent) - - if tagKey != "json:" && tagKey != "yaml:" { - return "", "" - } - - if strings.Contains(tagContent, ",") { - splitContent := strings.Split(tagContent, ",") - return splitContent[0], splitContent[1] - } - return tagContent, "" -} - -// exprToSchema converts ast.Expr to JSONSchemaProps -func (f *file) exprToSchema(t ast.Expr, doc string, comments []*ast.CommentGroup) (*v1beta1.JSONSchemaProps, []TypeReference) { - var def *v1beta1.JSONSchemaProps - var externalTypeRefs []TypeReference - - switch tt := t.(type) { - case *ast.Ident: - def = f.identToSchema(tt, comments) - case *ast.ArrayType: - def, externalTypeRefs = f.arrayTypeToSchema(tt, doc, comments) - case *ast.MapType: - def = f.mapTypeToSchema(tt, doc, comments) - case *ast.SelectorExpr: - def, externalTypeRefs = f.selectorExprToSchema(tt, comments) - case *ast.StarExpr: - def, externalTypeRefs = f.exprToSchema(tt.X, "", comments) - case *ast.StructType: - def, externalTypeRefs = f.structTypeToSchema(tt) - case *ast.InterfaceType: // TODO: handle interface if necessary. - return &v1beta1.JSONSchemaProps{}, []TypeReference{} - } - def.Description = filterDescription(doc) - - return def, externalTypeRefs -} - -// identToSchema converts ast.Ident to JSONSchemaProps. -func (f *file) identToSchema(ident *ast.Ident, comments []*ast.CommentGroup) *v1beta1.JSONSchemaProps { - def := &v1beta1.JSONSchemaProps{} - if isSimpleType(ident.Name) { - def.Type = jsonifyType(ident.Name) - } else { - def.Ref = getPrefixedDefLink(ident.Name, f.pkgPrefix) - } - processMarkersInComments(def, comments...) - return def -} - -// identToSchema converts ast.SelectorExpr to JSONSchemaProps. -func (f *file) selectorExprToSchema(selectorType *ast.SelectorExpr, comments []*ast.CommentGroup) (*v1beta1.JSONSchemaProps, []TypeReference) { - pkgAlias := selectorType.X.(*ast.Ident).Name - typeName := selectorType.Sel.Name - - typ := TypeReference{ - TypeName: typeName, - PackageName: f.importPaths[pkgAlias], - } - - time := TypeReference{TypeName: "Time", PackageName: "k8s.io/apimachinery/pkg/apis/meta/v1"} - duration := TypeReference{TypeName: "Duration", PackageName: "k8s.io/apimachinery/pkg/apis/meta/v1"} - quantity := TypeReference{TypeName: "Quantity", PackageName: "k8s.io/apimachinery/pkg/api/resource"} - unstructured := TypeReference{TypeName: "Unstructured", PackageName: "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"} - rawExtension := TypeReference{TypeName: "RawExtension", PackageName: "k8s.io/apimachinery/pkg/runtime"} - intOrString := TypeReference{TypeName: "IntOrString", PackageName: "k8s.io/apimachinery/pkg/util/intstr"} - - switch typ { - case time: - return &v1beta1.JSONSchemaProps{ - Type: "string", - Format: "date-time", - }, []TypeReference{} - case duration: - return &v1beta1.JSONSchemaProps{ - Type: "string", - }, []TypeReference{} - case quantity: - return &v1beta1.JSONSchemaProps{ - Type: "string", - }, []TypeReference{} - case unstructured, rawExtension: - return &v1beta1.JSONSchemaProps{ - Type: "object", - }, []TypeReference{} - case intOrString: - return &v1beta1.JSONSchemaProps{ - AnyOf: []v1beta1.JSONSchemaProps{ - { - Type: "string", - }, - { - Type: "integer", - }, - }, - }, []TypeReference{} - } - - def := &v1beta1.JSONSchemaProps{ - Ref: getPrefixedDefLink(typeName, f.importPaths[pkgAlias]), - } - processMarkersInComments(def, comments...) - return def, []TypeReference{{TypeName: typeName, PackageName: pkgAlias}} -} - -// arrayTypeToSchema converts ast.ArrayType to JSONSchemaProps by examining the elements in the array. -func (f *file) arrayTypeToSchema(arrayType *ast.ArrayType, doc string, comments []*ast.CommentGroup) (*v1beta1.JSONSchemaProps, []TypeReference) { - // not passing doc down to exprToSchema - items, extRefs := f.exprToSchema(arrayType.Elt, "", comments) - processMarkersInComments(items, comments...) - - def := &v1beta1.JSONSchemaProps{ - Type: "array", - Items: &v1beta1.JSONSchemaPropsOrArray{Schema: items}, - Description: doc, - } - - // TODO: clear the schema on the parent level, since it is on the children level. - - return def, extRefs -} - -// mapTypeToSchema converts ast.MapType to JSONSchemaProps. -func (f *file) mapTypeToSchema(mapType *ast.MapType, doc string, comments []*ast.CommentGroup) *v1beta1.JSONSchemaProps { - def := &v1beta1.JSONSchemaProps{} - switch mapType.Value.(type) { - case *ast.Ident: - valueType := mapType.Value.(*ast.Ident) - if def.AdditionalProperties == nil { - def.AdditionalProperties = &v1beta1.JSONSchemaPropsOrBool{} - } - def.AdditionalProperties.Schema = new(v1beta1.JSONSchemaProps) - - if isSimpleType(valueType.Name) { - def.AdditionalProperties.Schema.Type = valueType.Name - } else { - def.AdditionalProperties.Schema.Ref = getPrefixedDefLink(valueType.Name, f.pkgPrefix) - } - case *ast.InterfaceType: - // No op - panic("Map Interface Type") - } - def.Type = "object" - def.Description = doc - processMarkersInComments(def, comments...) - return def -} - -// structTypeToSchema converts ast.StructType to JSONSchemaProps by examining each field in the struct. -func (f *file) structTypeToSchema(structType *ast.StructType) (*v1beta1.JSONSchemaProps, []TypeReference) { - def := &v1beta1.JSONSchemaProps{ - Type: "object", - } - externalTypeRefs := []TypeReference{} - for _, field := range structType.Fields.List { - yamlName, option := extractFromTag(field.Tag) - - if (yamlName == "" && option != inlineTag) || yamlName == "-" { - continue - } - - if option != inlineTag && option != "omitempty" { - def.Required = append(def.Required, yamlName) - } - - if def.Properties == nil { - def.Properties = make(map[string]v1beta1.JSONSchemaProps) - } - - propDef, propExternalTypeDefs := f.exprToSchema(field.Type, field.Doc.Text(), f.commentMap[field]) - - externalTypeRefs = append(externalTypeRefs, propExternalTypeDefs...) - - if option == inlineTag { - def.AllOf = append(def.AllOf, *propDef) - continue - } - - def.Properties[yamlName] = *propDef - } - - return def, externalTypeRefs -} - -func getReachableTypes(startingTypes map[string]bool, definitions v1beta1.JSONSchemaDefinitions) map[string]bool { - pruner := DefinitionPruner{definitions, startingTypes} - prunedTypes := pruner.Prune(true) - return prunedTypes -} - -type file struct { - // name prefix of the package - pkgPrefix string - // importPaths contains a map from import alias to the import path for the file. - importPaths map[string]string - // commentMap is comment mapping for this file. - commentMap ast.CommentMap -} diff --git a/pkg/crd/v2/multiversion.go b/pkg/crd/v2/multiversion.go deleted file mode 100644 index e6153eec4..000000000 --- a/pkg/crd/v2/multiversion.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "go/build" - "io/ioutil" - "log" - "path/filepath" - - "github.com/spf13/afero" -) - -type listDirsFn func(pkgPath string) (dirs []string, e error) - -var defaultListDirs = func(path string) ([]string, error) { - pkg, err := build.Import(path, "", 0) - if err != nil { - return nil, err - } - infos, err := ioutil.ReadDir(pkg.Dir) - if err != nil { - return nil, err - } - var dirs []string - for _, info := range infos { - if info.IsDir() { - dirs = append(dirs, info.Name()) - } - } - return dirs, nil -} - -type MultiVersionOptions struct { - // InputPackage is the path of the input package that contains source files. - InputPackage string - // Types is a list of target types. - Types []string - - // fs is provided FS. We can use afero.NewMemFs() for testing. - fs afero.Fs - listDirsFn listDirsFn - listFilesFn listFilesFn -} - -type MultiVersionGenerator struct { - MultiVersionOptions - WriterOptions -} - -func (op *MultiVersionGenerator) Generate() { - if len(op.InputPackage) == 0 || len(op.OutputPath) == 0 { - log.Panic("Both input path and output paths need to be set") - } - - if op.fs == nil { - op.fs = afero.NewOsFs() - } - if op.listDirsFn == nil { - op.listDirsFn = defaultListDirs - } - - op.crdSpecs = op.parse() - - op.write(true, op.Types) -} - -func (op *MultiVersionOptions) parse() crdSpecByKind { - startingPointMap := make(map[string]bool) - for i := range op.Types { - startingPointMap[op.Types[i]] = true - } - - dirs, err := op.listDirsFn(op.InputPackage) - if err != nil { - panic(err) - } - - crdSpecs := crdSpecByKind{} - for _, dir := range dirs { - singleVer := SingleVersionOptions{ - Types: op.Types, - InputPackage: filepath.Join(op.InputPackage, dir), - Flatten: false, - fs: op.fs, - } - if op.listFilesFn != nil { - singleVer.listFilesFn = op.listFilesFn - } - _, crdSingleVersionSpecs := singleVer.parse() - // merge crd versions - err = mergeCRDVersions(crdSpecs, crdSingleVersionSpecs) - if err != nil { - panic(err) - } - } - - return crdSpecs -} diff --git a/pkg/crd/v2/multiversion_test.go b/pkg/crd/v2/multiversion_test.go deleted file mode 100644 index d55a86239..000000000 --- a/pkg/crd/v2/multiversion_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package v2 - -import ( - "reflect" - "testing" - - "github.com/ghodss/yaml" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type multiVersionTestcase struct { - inputPackage string - types []string - - listFilesFn listFilesFn - listDirsFn listDirsFn - // map of path to file content. - inputFiles map[string][]byte - - expectedCrdSpecs map[schema.GroupKind][]byte -} - -func TestMultiVerGenerate(t *testing.T) { - testcases := []multiVersionTestcase{ - { - inputPackage: "github.com/myorg/myapi", - types: []string{"Toy"}, - listDirsFn: func(pkgPath string) (strings []string, e error) { - return []string{"v1", "v1alpha1"}, nil - }, - listFilesFn: func(pkgPath string) (s string, strings []string, e error) { - return pkgPath, []string{"types.go"}, nil - }, - inputFiles: map[string][]byte{ - "github.com/myorg/myapi/v1/types.go": []byte(` -package v1 - -// +groupName=foo.bar.com -// +versionName=v1 - -// +kubebuilder:resource:path=toys,shortName=to;ty -// +kubebuilder:singular=toy - -// Toy is a toy struct -type Toy struct { - // +kubebuilder:validation:Maximum=90 - // +kubebuilder:validation:Minimum=1 - - // Replicas is a number - Replicas int32 ` + "`" + `json:"replicas"` + "`" + ` -} -`), - "github.com/myorg/myapi/v1alpha1/types.go": []byte(` -package v1alpha1 - -// +groupName=foo.bar.com -// +versionName=v1alpha1 - -// +kubebuilder:resource:path=toys,shortName=to;ty -// +kubebuilder:singular=toy - -// Toy is a toy struct -type Toy struct { - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:MinLength=1 - - // Name is a string - Name string ` + "`" + `json:"name,omitempty"` + "`" + ` - - // +kubebuilder:validation:Maximum=100 - // +kubebuilder:validation:Minimum=1 - - // Replicas is a number - Replicas int32 ` + "`" + `json:"replicas"` + "`" + ` -} -`), - }, - expectedCrdSpecs: map[schema.GroupKind][]byte{ - schema.GroupKind{Group: "foo.bar.com", Kind: "Toy"}: []byte(`group: foo.bar.com -names: - kind: Toy - plural: toys - shortNames: - - to - - ty - singular: toy -scope: Namespaced -versions: -- name: v1 - schema: - openAPIV3Schema: - description: Toy is a toy struct - properties: - replicas: - description: Replicas is a number - maximum: 90 - minimum: 1 - type: integer - required: - - replicas - type: object - served: true - storage: false -- name: v1alpha1 - schema: - openAPIV3Schema: - description: Toy is a toy struct - properties: - name: - description: Name is a string - maxLength: 15 - minLength: 1 - type: string - replicas: - description: Replicas is a number - maximum: 100 - minimum: 1 - type: integer - required: - - replicas - type: object - served: true - storage: false -`), - }, - }, - } - for _, tc := range testcases { - fs, err := prepareTestFs(tc.inputFiles) - if err != nil { - t.Errorf("unable to prepare the in-memory fs for testing: %v", err) - continue - } - - op := &MultiVersionOptions{ - InputPackage: tc.inputPackage, - Types: tc.types, - listDirsFn: tc.listDirsFn, - listFilesFn: tc.listFilesFn, - fs: fs, - } - - crdSpecs := op.parse() - - if len(tc.expectedCrdSpecs) > 0 { - expectedSpecsByKind := map[schema.GroupKind]*v1beta1.CustomResourceDefinitionSpec{} - for gk := range tc.expectedCrdSpecs { - var spec v1beta1.CustomResourceDefinitionSpec - err = yaml.Unmarshal(tc.expectedCrdSpecs[gk], &spec) - if err != nil { - t.Errorf("unable to unmarshal the expected crd spec: %v", err) - continue - } - expectedSpecsByKind[gk] = &spec - } - - if !reflect.DeepEqual(crdSpecs, crdSpecByKind(expectedSpecsByKind)) { - t.Errorf("expected:\n%+v,\nbut got:\n%+v\n", expectedSpecsByKind, crdSpecs) - continue - } - } - } -} diff --git a/pkg/crd/v2/parser.go b/pkg/crd/v2/parser.go deleted file mode 100644 index 3654464f5..000000000 --- a/pkg/crd/v2/parser.go +++ /dev/null @@ -1,288 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "log" - "path/filepath" - "strings" - - "github.com/spf13/afero" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func (pr *prsr) parseTypesInFile(filePath string, curPkgPrefix string, skipCRD bool) ( - v1beta1.JSONSchemaDefinitions, ExternalReferences, crdSpecByKind) { - // Open the input go file and parse the Abstract Syntax Tree - fset := token.NewFileSet() - srcFile, err := pr.fs.Open(filePath) - if err != nil { - log.Fatal(err) - } - node, err := parser.ParseFile(fset, filePath, srcFile, parser.ParseComments) - if err != nil { - log.Fatal(err) - } - - definitions := make(v1beta1.JSONSchemaDefinitions) - externalRefs := make(ExternalReferences) - - // Parse import statements to get "alias: pkgName" mapping - importPaths := make(map[string]string) - for _, importItem := range node.Imports { - pathValue := strings.Trim(importItem.Path.Value, "\"") - if importItem.Name != nil { - // Process aliased import - importPaths[importItem.Name.Name] = pathValue - } else if strings.Contains(pathValue, "/") { - // Process unnamed imports with "/" - segments := strings.Split(pathValue, "/") - importPaths[segments[len(segments)-1]] = pathValue - } else { - importPaths[pathValue] = pathValue - } - } - - // Create an ast.CommentMap from the ast.File's comments. - // This helps keeping the association between comments and AST nodes. - // TODO: if necessary, support our own rules of comments ownership, golang's - // builtin rules are listed at https://golang.org/pkg/go/ast/#NewCommentMap. - // It seems it can meet our need at the moment. - cmap := ast.NewCommentMap(fset, node, node.Comments) - - f := &file{ - pkgPrefix: curPkgPrefix, - importPaths: importPaths, - commentMap: cmap, - } - - crdSpecs := crdSpecByKind{} - for i := range node.Decls { - declaration, ok := node.Decls[i].(*ast.GenDecl) - if !ok { - continue - } - - // Skip it if it's not type declaration. - if declaration.Tok != token.TYPE { - continue - } - - // We support the following format - // // TreeNode doc - // type TreeNode struct { - // left, right *TreeNode - // value *Comparable - // } - // but not - // type ( - // // Point doc - // Point struct{ x, y float64 } - // // Point2 doc - // Point2 struct{ x, y int } - // ) - // since the latter format is rarely used in k8s. - if len(declaration.Specs) != 1 { - continue - } - ts := declaration.Specs[0] - typeSpec, ok := ts.(*ast.TypeSpec) - if !ok { - fmt.Printf("spec type is: %T\n", ts) - continue - } - - typeName := typeSpec.Name.Name - typeDescription := declaration.Doc.Text() - - fmt.Println("Generating schema definition for type:", typeName) - def, refTypes := f.exprToSchema(typeSpec.Type, typeDescription, []*ast.CommentGroup{}) - definitions[getFullName(typeName, curPkgPrefix)] = *def - externalRefs[getFullName(typeName, curPkgPrefix)] = refTypes - - var comments []string - for _, c := range f.commentMap[node.Decls[i]] { - comments = append(comments, strings.Split(c.Text(), "\n")...) - } - - if !skipCRD { - crdSpec := parseCRDs(comments) - if crdSpec != nil { - crdSpec.Names.Kind = typeName - gk := schema.GroupKind{Kind: typeName} - crdSpecs[gk] = crdSpec - // TODO: validate the CRD spec for one version. - } - } - } - - if !skipCRD { - // process top-level (not tied to a struct field) markers. - // e.g. group name marker +groupName= - pr.processTopLevelMarkers(node.Comments) - } - - // Overwrite import aliases with actual package names - for typeName := range externalRefs { - for i, ref := range externalRefs[typeName] { - externalRefs[typeName][i].PackageName = importPaths[ref.PackageName] - } - } - - return definitions, externalRefs, crdSpecs -} - -// processTopLevelMarkers process top-level (not tied to a struct field) markers. -// e.g. group name marker +groupName= -func (pr *prsr) processTopLevelMarkers(comments []*ast.CommentGroup) { - for _, c := range comments { - commentLines := strings.Split(c.Text(), "\n") - cs := Comments(commentLines) - if cs.hasTag("groupName") { - group := cs.getTag("groupName", "=") - if len(group) == 0 { - log.Fatalf("can't use an empty name for the +groupName marker") - } - if pr.generatorOptions != nil && len(pr.generatorOptions.group) > 0 && group != pr.generatorOptions.group { - log.Fatalf("can't have different group names %q and %q one package", pr.generatorOptions.group, group) - } - if pr.generatorOptions == nil { - pr.generatorOptions = &pkglevelGeneratorOptions{group: group} - } else { - pr.generatorOptions.group = group - } - } - - if cs.hasTag("versionName") { - version := cs.getTag("versionName", "=") - if len(version) == 0 { - log.Fatalf("can't use an empty name for the +versionName marker") - } - if pr.generatorOptions != nil && len(pr.generatorOptions.version) > 0 && version != pr.generatorOptions.version { - log.Fatalf("can't have different version names %q and %q one package", pr.generatorOptions.version, version) - } - if pr.generatorOptions == nil { - pr.generatorOptions = &pkglevelGeneratorOptions{version: version} - } else { - pr.generatorOptions.version = version - } - } - } -} - -func (pr *prsr) parseTypesInPackage(pkgName string, referencedTypes map[string]bool, rootPackage, skipCRD bool) ( - v1beta1.JSONSchemaDefinitions, crdSpecByKind) { - pkgDefs := make(v1beta1.JSONSchemaDefinitions) - pkgExternalTypes := make(ExternalReferences) - pkgCRDSpecs := make(crdSpecByKind) - - pkgDir, listOfFiles, err := pr.listFilesFn(pkgName) - if err != nil { - log.Fatal(err) - } - - pkgPrefix := strings.Replace(pkgName, "/", ".", -1) - if rootPackage { - pkgPrefix = "" - } - fmt.Println("pkgPrefix=", pkgPrefix) - for _, fileName := range listOfFiles { - fmt.Println("Processing file ", fileName) - fileDefs, fileExternalRefs, fileCRDSpecs := pr.parseTypesInFile(filepath.Join(pkgDir, fileName), pkgPrefix, skipCRD) - mergeDefs(pkgDefs, fileDefs) - mergeExternalRefs(pkgExternalTypes, fileExternalRefs) - mergeCRDSpecs(pkgCRDSpecs, fileCRDSpecs) - } - - // Add pkg prefix to referencedTypes - newReferencedTypes := make(map[string]bool) - for key := range referencedTypes { - altKey := getFullName(key, pkgPrefix) - newReferencedTypes[altKey] = referencedTypes[key] - } - referencedTypes = newReferencedTypes - - fmt.Println("referencedTypes") - debugPrint(referencedTypes) - - allReachableTypes := getReachableTypes(referencedTypes, pkgDefs) - for key := range pkgDefs { - if _, exists := allReachableTypes[key]; !exists { - delete(pkgDefs, key) - delete(pkgExternalTypes, key) - } - } - fmt.Println("allReachableTypes") - debugPrint(allReachableTypes) - fmt.Println("pkgDefs") - debugPrint(pkgDefs) - fmt.Println("pkgExternalTypes") - debugPrint(pkgExternalTypes) - - uniquePkgTypeRefs := make(map[string]map[string]bool) - for _, item := range pkgExternalTypes { - for _, typeRef := range item { - if _, ok := uniquePkgTypeRefs[typeRef.PackageName]; !ok { - uniquePkgTypeRefs[typeRef.PackageName] = make(map[string]bool) - } - uniquePkgTypeRefs[typeRef.PackageName][typeRef.TypeName] = true - } - } - - for childPkgName := range uniquePkgTypeRefs { - childTypes := uniquePkgTypeRefs[childPkgName] - childPkgPr := prsr{fs: pr.fs} - childDefs, _ := childPkgPr.parseTypesInPackage(childPkgName, childTypes, false, true) - mergeDefs(pkgDefs, childDefs) - } - - return pkgDefs, pr.fanoutPkgLevelOptions(pkgCRDSpecs) -} - -type pkglevelGeneratorOptions struct { - group string - version string -} - -type prsr struct { - generatorOptions *pkglevelGeneratorOptions - - listFilesFn listFilesFn - fs afero.Fs -} - -func (pr *prsr) fanoutPkgLevelOptions(crdSpecs crdSpecByKind) crdSpecByKind { - rtCRDSpecs := crdSpecByKind{} - for gk := range crdSpecs { - if pr.generatorOptions != nil { - crdSpecs[gk].Group = pr.generatorOptions.group - if len(crdSpecs[gk].Versions) == 1 { - crdSpecs[gk].Versions[0].Name = pr.generatorOptions.version - } - rtCRDSpecs[schema.GroupKind{Group: pr.generatorOptions.group, Kind: gk.Kind}] = crdSpecs[gk] - } else { - rtCRDSpecs[gk] = crdSpecs[gk] - } - } - return rtCRDSpecs -} diff --git a/pkg/crd/v2/singleversion.go b/pkg/crd/v2/singleversion.go deleted file mode 100644 index e55c7f72f..000000000 --- a/pkg/crd/v2/singleversion.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "go/build" - "log" - - "github.com/spf13/afero" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -type listFilesFn func(pkgPath string) (dir string, files []string, e error) - -var defaultListFiles = func(pkgPath string) (string, []string, error) { - pkg, err := build.Import(pkgPath, "", 0) - return pkg.Dir, pkg.GoFiles, err -} - -type SingleVersionOptions struct { - // InputPackage is the path of the input package that contains source files. - InputPackage string - // Types is a list of target types. - Types []string - // Flatten contains if we use a flattened structure or a embedded structure. - Flatten bool - - // fs is provided FS. We can use afero.NewMemFs() for testing. - fs afero.Fs - listFilesFn listFilesFn -} - -type SingleVersionGenerator struct { - SingleVersionOptions - WriterOptions - - outputCRD bool -} - -func (op *SingleVersionOptions) parse() (v1beta1.JSONSchemaDefinitions, crdSpecByKind) { - startingPointMap := make(map[string]bool) - for i := range op.Types { - startingPointMap[op.Types[i]] = true - } - pr := prsr{ - listFilesFn: op.listFilesFn, - fs: op.fs, - } - defs, crdSpecs := pr.parseTypesInPackage(op.InputPackage, startingPointMap, true, false) - - // flattenAllOf only flattens allOf tags - flattenAllOf(defs) - - reachableTypes := getReachableTypes(startingPointMap, defs) - for key := range defs { - if _, exists := reachableTypes[key]; !exists { - delete(defs, key) - } - } - - checkDefinitions(defs, startingPointMap) - - if !op.Flatten { - defs = embedSchema(defs, startingPointMap) - - newDefs := v1beta1.JSONSchemaDefinitions{} - for name := range startingPointMap { - newDefs[name] = defs[name] - } - defs = newDefs - } - - pr.linkCRDSpec(defs, crdSpecs) - return defs, crdSpecs -} - -func (op *SingleVersionGenerator) Generate() { - if len(op.InputPackage) == 0 || len(op.OutputPath) == 0 { - log.Panic("Both input path and output paths need to be set") - } - - if op.fs == nil { - op.fs = afero.NewOsFs() - } - if op.listFilesFn == nil { - op.listFilesFn = defaultListFiles - } - - if op.outputCRD { - // if generating CRD, we should always embed schemas. - op.Flatten = false - } - - op.defs, op.crdSpecs = op.parse() - - op.write(op.outputCRD, op.Types) -} - -func (pr *prsr) linkCRDSpec(defs v1beta1.JSONSchemaDefinitions, crdSpecs crdSpecByKind) { - for gk := range crdSpecs { - if len(crdSpecs[gk].Versions) == 0 { - log.Printf("no version for CRD %q", gk) - continue - } - if len(crdSpecs[gk].Versions) > 1 { - log.Fatalf("the number of versions in one package should not be more than 1") - } - def, ok := defs[gk.Kind] - if !ok { - log.Printf("can't get json shchema for %q", gk) - continue - } - crdSpecs[gk].Versions[0].Schema = &v1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &def, - } - } -} diff --git a/pkg/crd/v2/singleversion_test.go b/pkg/crd/v2/singleversion_test.go deleted file mode 100644 index d9255434f..000000000 --- a/pkg/crd/v2/singleversion_test.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "reflect" - "testing" - - "github.com/ghodss/yaml" - "github.com/spf13/afero" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type singleVersionTestcase struct { - inputPackage string - types []string - flatten bool - - listFilesFn listFilesFn - // map of path to file content. - inputFiles map[string][]byte - - expectedDefs []byte - expectedCrdSpecs map[schema.GroupKind][]byte -} - -func TestSingleVerGenerate(t *testing.T) { - testcases := []singleVersionTestcase{ - { - inputPackage: "github.com/myorg/myapi", - types: []string{"Toy"}, - flatten: false, - listFilesFn: func(pkgPath string) (s string, strings []string, e error) { - return "github.com/myorg/myapi", []string{"types.go"}, nil - }, - inputFiles: map[string][]byte{ - "github.com/myorg/myapi/types.go": []byte(` -package myapi - -// Toy is a toy struct -type Toy struct { - // Replicas is a number - Replicas int32 ` + "`" + `json:"replicas"` + "`" + ` -} -`), - }, - expectedDefs: []byte(`Toy: - description: Toy is a toy struct - properties: - replicas: - description: Replicas is a number - type: integer - required: - - replicas - type: object -`), - }, - { - inputPackage: "github.com/myorg/myapi", - types: []string{"Toy"}, - flatten: false, - listFilesFn: func(pkgPath string) (s string, strings []string, e error) { - return "github.com/myorg/myapi", []string{"types.go"}, nil - }, - inputFiles: map[string][]byte{ - "github.com/myorg/myapi/types.go": []byte(` -package myapi - -// +groupName=foo.bar.com -// +versionName=v1 - -// +kubebuilder:resource:path=toys,shortName=to;ty -// +kubebuilder:singular=toy - -// Toy is a toy struct -type Toy struct { - // +kubebuilder:validation:Maximum=90 - // +kubebuilder:validation:Minimum=1 - - // Replicas is a number - Replicas int32 ` + "`" + `json:"replicas"` + "`" + ` -} -`), - }, - expectedDefs: []byte(`Toy: - description: Toy is a toy struct - properties: - replicas: - description: Replicas is a number - maximum: 90 - minimum: 1 - type: integer - required: - - replicas - type: object -`), - expectedCrdSpecs: map[schema.GroupKind][]byte{ - schema.GroupKind{Group: "foo.bar.com", Kind: "Toy"}: []byte(`group: foo.bar.com -names: - kind: Toy - plural: toys - shortNames: - - to - - ty - singular: toy -scope: Namespaced -versions: -- name: v1 - schema: - openAPIV3Schema: - description: Toy is a toy struct - properties: - replicas: - description: Replicas is a number - maximum: 90 - minimum: 1 - type: integer - required: - - replicas - type: object - served: true - storage: false -`), - }, - }, - } - for _, tc := range testcases { - fs, err := prepareTestFs(tc.inputFiles) - if err != nil { - t.Errorf("unable to prepare the in-memory fs for testing: %v", err) - continue - } - - op := &SingleVersionOptions{ - InputPackage: tc.inputPackage, - Types: tc.types, - Flatten: tc.flatten, - listFilesFn: tc.listFilesFn, - fs: fs, - } - - defs, crdSpecs := op.parse() - - if len(tc.expectedDefs) > 0 { - var expectedDefs v1beta1.JSONSchemaDefinitions - err = yaml.Unmarshal(tc.expectedDefs, &expectedDefs) - if err != nil { - t.Errorf("unable to unmarshal the expected definitions: %v", err) - continue - } - if !reflect.DeepEqual(defs, expectedDefs) { - defsYaml, err := yaml.Marshal(defs) - if err != nil { - t.Errorf("unable to marshal the actual definitions: %v", err) - } - t.Errorf("expected: %s, but got: %s", tc.expectedDefs, defsYaml) - continue - } - } - if len(tc.expectedCrdSpecs) > 0 { - expectedSpecsByKind := map[schema.GroupKind]*v1beta1.CustomResourceDefinitionSpec{} - for gk := range tc.expectedCrdSpecs { - var spec v1beta1.CustomResourceDefinitionSpec - err = yaml.Unmarshal(tc.expectedCrdSpecs[gk], &spec) - if err != nil { - t.Errorf("unable to unmarshal the expected crd spec: %v", err) - continue - } - expectedSpecsByKind[gk] = &spec - } - - if !reflect.DeepEqual(crdSpecs, crdSpecByKind(expectedSpecsByKind)) { - t.Errorf("expected:\n%+v,\nbut got:\n%+v\n", expectedSpecsByKind, crdSpecs) - continue - } - } - } -} - -func prepareTestFs(files map[string][]byte) (afero.Fs, error) { - fs := afero.NewMemMapFs() - for filename, content := range files { - if err := afero.WriteFile(fs, filename, content, 0666); err != nil { - return nil, err - } - } - return fs, nil -} diff --git a/pkg/crd/v2/types.go b/pkg/crd/v2/types.go deleted file mode 100644 index de14335e2..000000000 --- a/pkg/crd/v2/types.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type crdSpecByKind map[schema.GroupKind]*v1beta1.CustomResourceDefinitionSpec - -// TypeReference denotes the (typeName, packageName) tuple -type TypeReference struct { - TypeName string - PackageName string -} - -// ExternalReferences map contains list of "type: packageName" entries -type ExternalReferences map[string][]TypeReference diff --git a/pkg/crd/v2/utils.go b/pkg/crd/v2/utils.go deleted file mode 100644 index 09be38ee6..000000000 --- a/pkg/crd/v2/utils.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -const ( - stringType = "string" - intType = "int" - int32Type = "int32" - int64Type = "int64" - boolType = "bool" - byteType = "byte" - float32Type = "float32" - float64Type = "float64" - - stringJSONType = "string" - integerJSONType = "integer" - booleanJSONType = "boolean" - floatJSONType = "float" -) - -func isSimpleType(typeName string) bool { - return typeName == stringType || typeName == intType || - typeName == int32Type || typeName == int64Type || - typeName == boolType || typeName == byteType || - typeName == float32Type || typeName == float64Type -} - -// Converts the typeName simple type to json type -func jsonifyType(typeName string) string { - switch typeName { - case stringType: - return stringJSONType - case boolType: - return booleanJSONType - case intType, int32Type, int64Type: - return integerJSONType - case float32Type, float64Type: - return floatJSONType - case byteType: - return stringJSONType - } - fmt.Println("jsonifyType called with a complex type ", typeName) - panic("jsonifyType called with a complex type") -} - -func mergeDefs(lhs v1beta1.JSONSchemaDefinitions, rhs v1beta1.JSONSchemaDefinitions) { - if lhs == nil || rhs == nil { - return - } - for key := range rhs { - _, ok := lhs[key] - if ok { - // change this to logger - fmt.Println("JSONSchemaProps ", key, " already present") - continue - } - lhs[key] = rhs[key] - } -} - -func mergeExternalRefs(lhs ExternalReferences, rhs ExternalReferences) { - if lhs == nil || rhs == nil { - return - } - for key := range rhs { - _, ok := lhs[key] - if !ok { - lhs[key] = rhs[key] - } else { - lhs[key] = append(lhs[key], rhs[key]...) - } - } -} - -func mergeCRDSpecs(lhs, rhs crdSpecByKind) { - if lhs == nil || rhs == nil { - return - } - for key := range rhs { - _, ok := lhs[key] - if ok { - // TODO: change this to use logger - fmt.Printf("CRD spec for kind %q already present", key) - continue - } - lhs[key] = rhs[key] - } -} - -func mergeCRDVersions(lhs, rhs crdSpecByKind) error { - if lhs == nil || rhs == nil { - return nil - } - for gk := range rhs { - _, ok := lhs[gk] - if !ok { - lhs[gk] = rhs[gk] - continue - } - - if len(lhs[gk].Group) == 0 { - lhs[gk].Group = rhs[gk].Group - } else if lhs[gk].Group != rhs[gk].Group { - return fmt.Errorf("group names %q and %q from different packages must match", lhs[gk].Group, rhs[gk].Group) - } - - if len(lhs[gk].Scope) == 0 { - lhs[gk].Scope = rhs[gk].Scope - } else if lhs[gk].Scope != rhs[gk].Scope { - return fmt.Errorf("group names %q and %q from different packages must match", lhs[gk].Group, rhs[gk].Group) - } - - if len(lhs[gk].Names.Kind) == 0 { - lhs[gk].Names.Kind = rhs[gk].Names.Kind - } else if lhs[gk].Names.Kind != rhs[gk].Names.Kind { - return fmt.Errorf("kind names %q and %q from different packages must match", lhs[gk].Names.Kind, rhs[gk].Names.Kind) - } - if len(lhs[gk].Names.Plural) == 0 { - lhs[gk].Names.Plural = rhs[gk].Names.Plural - } else if lhs[gk].Names.Plural != rhs[gk].Names.Plural { - return fmt.Errorf("plural resource names %q and %q from different packages must match", lhs[gk].Names.Plural, rhs[gk].Names.Plural) - } - if len(lhs[gk].Names.Singular) == 0 { - lhs[gk].Names.Singular = rhs[gk].Names.Singular - } else if lhs[gk].Names.Singular != rhs[gk].Names.Singular { - return fmt.Errorf("singular resource names %q and %q from different packages must match", lhs[gk].Names.Singular, rhs[gk].Names.Singular) - } - if len(lhs[gk].Names.ShortNames) == 0 { - lhs[gk].Names.ShortNames = rhs[gk].Names.ShortNames - } else if !reflect.DeepEqual(lhs[gk].Names.ShortNames, rhs[gk].Names.ShortNames) { - return fmt.Errorf("short names %s and %s from different packages must match", lhs[gk].Names.ShortNames, rhs[gk].Names.ShortNames) - } - - lhs[gk].Versions = append(lhs[gk].Versions, rhs[gk].Versions...) - } - return nil -} - -func debugPrint(obj interface{}) { - b, err3 := json.Marshal(obj) - if err3 != nil { - panic("Error") - } - fmt.Println(string(b)) -} - -// Gets the schema definition link of a resource -func getDefLink(resourceName string) *string { - ret := defPrefix + resourceName - return &ret -} - -func getFullName(resourceName string, prefix string) string { - if prefix == "" { - return resourceName - } - prefix = strings.Replace(prefix, "/", ".", -1) - return prefix + "." + resourceName -} - -func getPrefixedDefLink(resourceName string, prefix string) *string { - ret := defPrefix + getFullName(resourceName, prefix) - return &ret -} - -// Gets the resource name from definitions url. -// Eg, returns 'TypeName' from '#/definitions/TypeName' -func getNameFromURL(url string) string { - slice := strings.Split(url, "/") - return slice[len(slice)-1] -} diff --git a/pkg/crd/v2/validation.go b/pkg/crd/v2/validation.go deleted file mode 100644 index 8049fa5a3..000000000 --- a/pkg/crd/v2/validation.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "fmt" - "go/ast" - "log" - "strconv" - "strings" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" -) - -// filterDescription parse comments above each field in the type definition. -func filterDescription(res string) string { - var temp strings.Builder - var desc string - for _, comment := range strings.Split(res, "\n") { - comment = strings.Trim(comment, " ") - if !(strings.Contains(comment, "+kubebuilder") || strings.HasPrefix(comment, "+")) { - temp.WriteString(comment) - temp.WriteString(" ") - desc = strings.TrimRight(temp.String(), " ") - } - } - return desc -} - -func processMarkersInComments(def *v1beta1.JSONSchemaProps, commentGroups ...*ast.CommentGroup) { - for _, commentGroup := range commentGroups { - for _, comment := range strings.Split(commentGroup.Text(), "\n") { - getValidation(comment, def) - } - } -} - -// This method is ported from controller-tools, it can removed when things are moved back. -// getValidation parses the validation tags from the comment and sets the -// validation rules on the given JSONSchemaProps. -// TODO: reduce the cyclomatic complexity and remove next line -//// nolint: gocyclo -func getValidation(comment string, props *v1beta1.JSONSchemaProps) { - const arrayType = "array" - comment = strings.TrimLeft(comment, " ") - if !strings.HasPrefix(comment, "+kubebuilder:validation:") { - return - } - c := strings.Replace(comment, "+kubebuilder:validation:", "", -1) - parts := strings.Split(c, "=") - if len(parts) != 2 { - log.Fatalf("Expected +kubebuilder:validation:= actual: %s", comment) - return - } - switch parts[0] { - case "Maximum": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.Maximum = &f - case "ExclusiveMaximum": - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.ExclusiveMaximum = b - case "Minimum": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.Minimum = &f - case "ExclusiveMinimum": - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.ExclusiveMinimum = b - case "MaxLength": - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MaxLength = &v - case "MinLength": - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MinLength = &v - case "Pattern": - props.Pattern = parts[1] - case "MaxItems": - if props.Type == arrayType { - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MaxItems = &v - } - case "MinItems": - if props.Type == arrayType { - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MinItems = &v - } - case "UniqueItems": - if props.Type == arrayType { - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.UniqueItems = b - } - case "MultipleOf": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.MultipleOf = &f - case "Enum": - if props.Type != arrayType { - value := strings.Split(parts[1], ",") - enums := []v1beta1.JSON{} - for _, s := range value { - checkType(props, s, &enums) - } - props.Enum = enums - } - case "Format": - props.Format = parts[1] - default: - log.Fatalf("Unsupport validation: %s", comment) - } -} - -// check type of enum element value to match type of field -func checkType(props *v1beta1.JSONSchemaProps, s string, enums *[]v1beta1.JSON) { - switch props.Type { - case "integer": - if _, err := strconv.ParseInt(s, 0, 64); err != nil { - log.Fatalf("Invalid integer value [%v] for a field of integer type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "float", "number": - if _, err := strconv.ParseFloat(s, 64); err != nil { - log.Fatalf("Invalid float value [%v] for a field of float type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "string": - *enums = append(*enums, v1beta1.JSON{Raw: []byte(`"` + s + `"`)}) - } -} diff --git a/pkg/crd/v2/writer.go b/pkg/crd/v2/writer.go deleted file mode 100644 index ed572d621..000000000 --- a/pkg/crd/v2/writer.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - "encoding/json" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - - "github.com/ghodss/yaml" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type WriterOptions struct { - // OutputPath is the path that the schema will be written to. - OutputPath string - // OutputFormat should be either json or yaml. Default to json - OutputFormat string - - defs v1beta1.JSONSchemaDefinitions - crdSpecs crdSpecByKind -} - -func (op *WriterOptions) write(outputCRD bool, types []string) { - var toSerilizeList []interface{} - if outputCRD { - for gk, spec := range op.crdSpecs { - crd := &v1beta1.CustomResourceDefinition{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apiextensions.k8s.io/v1beta1", - Kind: "CustomResourceDefinition", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: strings.ToLower(gk.Kind), - Labels: map[string]string{"controller-tools.k8s.io": "1.0"}, - }, - Spec: *spec, - } - toSerilizeList = append(toSerilizeList, crd) - } - } else { - schema := v1beta1.JSONSchemaProps{Definitions: op.defs} - schema.Type = "object" - schema.AnyOf = []v1beta1.JSONSchemaProps{} - for _, typeName := range types { - schema.AnyOf = append(schema.AnyOf, v1beta1.JSONSchemaProps{Ref: getDefLink(typeName)}) - } - toSerilizeList = []interface{}{schema} - } - - dir := filepath.Dir(op.OutputPath) - err := os.MkdirAll(dir, 0666) - if err != nil { - log.Fatal(err) - } - out, err := os.Create(op.OutputPath) - if err != nil { - log.Fatal(err) - } - - for i := range toSerilizeList { - switch strings.ToLower(op.OutputFormat) { - // default to json - case "json", "": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - err = enc.Encode(toSerilizeList[i]) - if err2 := out.Close(); err == nil { - err = err2 - } - if err != nil { - log.Panic(err) - } - case "yaml": - m, err := yaml.Marshal(toSerilizeList[i]) - if err != nil { - log.Panic(err) - } - err = ioutil.WriteFile(op.OutputPath, m, 0644) - if err != nil { - log.Panic(err) - } - } - } -} diff --git a/pkg/internal/codegen/parse/apis.go b/pkg/internal/codegen/parse/apis.go deleted file mode 100644 index c953b4b3b..000000000 --- a/pkg/internal/codegen/parse/apis.go +++ /dev/null @@ -1,287 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "path" - "path/filepath" - "strings" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/types" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" -) - -type genUnversionedType struct { - Type *types.Type - Resource *codegen.APIResource -} - -func (b *APIs) parseAPIs() { - apis := &codegen.APIs{ - Domain: b.Domain, - Package: b.APIsPkg, - Groups: map[string]*codegen.APIGroup{}, - Rules: b.Rules, - Informers: b.Informers, - } - - for group, versionMap := range b.ByGroupVersionKind { - apiGroup := &codegen.APIGroup{ - Group: group, - GroupTitle: strings.Title(group), - Domain: b.Domain, - Versions: map[string]*codegen.APIVersion{}, - UnversionedResources: map[string]*codegen.APIResource{}, - } - - for version, kindMap := range versionMap { - apiVersion := &codegen.APIVersion{ - Domain: b.Domain, - Group: group, - Version: version, - Resources: map[string]*codegen.APIResource{}, - } - for kind, resource := range kindMap { - apiResource := &codegen.APIResource{ - Domain: resource.Domain, - Version: resource.Version, - Group: resource.Group, - Resource: resource.Resource, - Type: resource.Type, - REST: resource.REST, - Kind: resource.Kind, - Subresources: resource.Subresources, - StatusStrategy: resource.StatusStrategy, - Strategy: resource.Strategy, - NonNamespaced: resource.NonNamespaced, - ShortName: resource.ShortName, - } - parseDoc(resource, apiResource) - apiVersion.Resources[kind] = apiResource - // Set the package for the api version - apiVersion.Pkg = b.context.Universe[resource.Type.Name.Package] - // Set the package for the api group - apiGroup.Pkg = b.context.Universe[filepath.Dir(resource.Type.Name.Package)] - if apiGroup.Pkg != nil { - apiGroup.PkgPath = apiGroup.Pkg.Path - } - - apiGroup.UnversionedResources[kind] = apiResource - } - - apiGroup.Versions[version] = apiVersion - } - b.parseStructs(apiGroup) - apis.Groups[group] = apiGroup - } - apis.Pkg = b.context.Universe[b.APIsPkg] - b.APIs = apis -} - -func (b *APIs) parseStructs(apigroup *codegen.APIGroup) { - remaining := []genUnversionedType{} - for _, version := range apigroup.Versions { - for _, resource := range version.Resources { - remaining = append(remaining, genUnversionedType{resource.Type, resource}) - } - } - for _, version := range b.SubByGroupVersionKind[apigroup.Group] { - for _, kind := range version { - remaining = append(remaining, genUnversionedType{kind, nil}) - } - } - - done := sets.String{} - for len(remaining) > 0 { - // Pop the next element from the list - next := remaining[0] - remaining[0] = remaining[len(remaining)-1] - remaining = remaining[:len(remaining)-1] - - // Already processed this type. Skip it - if done.Has(next.Type.Name.Name) { - continue - } - done.Insert(next.Type.Name.Name) - - // Generate the struct and append to the list - result, additionalTypes := parseType(next.Type) - - // This is a resource, so generate the client - if b.genClient(next.Type) { - result.GenClient = true - result.GenDeepCopy = true - } - - if next.Resource != nil { - result.NonNamespaced = IsNonNamespaced(next.Type) - } - - if b.genDeepCopy(next.Type) { - result.GenDeepCopy = true - } - apigroup.Structs = append(apigroup.Structs, result) - - // Add the newly discovered subtypes - for _, at := range additionalTypes { - remaining = append(remaining, genUnversionedType{at, nil}) - } - } -} - -// parseType parses the type into a Struct, and returns a list of types that -// need to be parsed -func parseType(t *types.Type) (*codegen.Struct, []*types.Type) { - remaining := []*types.Type{} - - s := &codegen.Struct{ - Name: t.Name.Name, - GenClient: false, - GenUnversioned: true, // Generate unversioned structs by default - } - - for _, c := range t.CommentLines { - if strings.Contains(c, "+genregister:unversioned=false") { - // Don't generate the unversioned struct - s.GenUnversioned = false - } - } - - for _, member := range t.Members { - uType := member.Type.Name.Name - memberName := member.Name - uImport := "" - - // Use the element type for Pointers, Maps and Slices - mSubType := member.Type - hasElem := false - for mSubType.Elem != nil { - mSubType = mSubType.Elem - hasElem = true - } - if hasElem { - // Strip the package from the field type - uType = strings.Replace(member.Type.String(), mSubType.Name.Package+".", "", 1) - } - - base := filepath.Base(member.Type.String()) - samepkg := t.Name.Package == mSubType.Name.Package - - // If not in the same package, calculate the import pkg - if !samepkg { - parts := strings.Split(base, ".") - if len(parts) > 1 { - // Don't generate unversioned types for core types, just use the versioned types - if strings.HasPrefix(mSubType.Name.Package, "k8s.io/api/") { - // Import the package under an alias so it doesn't conflict with other groups - // having the same version - importAlias := path.Base(path.Dir(mSubType.Name.Package)) + path.Base(mSubType.Name.Package) - uImport = fmt.Sprintf("%s \"%s\"", importAlias, mSubType.Name.Package) - if hasElem { - // Replace the full package with the alias when referring to the type - uType = strings.Replace(member.Type.String(), mSubType.Name.Package, importAlias, 1) - } else { - // Replace the full package with the alias when referring to the type - uType = fmt.Sprintf("%s.%s", importAlias, parts[1]) - } - } else { - switch member.Type.Name.Package { - case "k8s.io/apimachinery/pkg/apis/meta/v1": - // Use versioned types for meta/v1 - uImport = fmt.Sprintf("%s \"%s\"", "metav1", "k8s.io/apimachinery/pkg/apis/meta/v1") - uType = "metav1." + parts[1] - default: - // Use unversioned types for everything else - t := member.Type - - if t.Elem != nil { - // handle Pointers, Maps, Slices - - // We need to parse the package from the Type String - t = t.Elem - str := member.Type.String() - startPkg := strings.LastIndexAny(str, "*]") - endPkg := strings.LastIndexAny(str, ".") - pkg := str[startPkg+1 : endPkg] - name := str[endPkg+1:] - prefix := str[:startPkg+1] - - uImportBase := path.Base(pkg) - uImportName := path.Base(path.Dir(pkg)) + uImportBase - uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) - - uType = prefix + uImportName + "." + name - } else { - // handle non- Pointer, Maps, Slices - pkg := t.Name.Package - name := t.Name.Name - - // Come up with the alias the package is imported under - // Concatenate with directory package to reduce naming collisions - uImportBase := path.Base(pkg) - uImportName := path.Base(path.Dir(pkg)) + uImportBase - - // Create the import statement - uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) - - // Create the field type name - should be . - uType = uImportName + "." + name - } - } - } - } - } - - if member.Embedded { - memberName = "" - } - - s.Fields = append(s.Fields, &codegen.Field{ - Name: memberName, - VersionedPackage: member.Type.Name.Package, - UnversionedImport: uImport, - UnversionedType: uType, - }) - - // Add this member Type for processing if it isn't a primitive and - // is part of the same API group - if !mSubType.IsPrimitive() && GetGroup(mSubType) == GetGroup(t) { - remaining = append(remaining, mSubType) - } - } - return s, remaining -} - -func (b *APIs) genClient(c *types.Type) bool { - comments := Comments(c.CommentLines) - resource := comments.getTag("resource", ":") + comments.getTag("kubebuilder:resource", ":") - return len(resource) > 0 -} - -func (b *APIs) genDeepCopy(c *types.Type) bool { - comments := Comments(c.CommentLines) - return comments.hasTag("subresource-request") -} - -func parseDoc(resource, apiResource *codegen.APIResource) { - if HasDocAnnotation(resource.Type) { - resource.DocAnnotation = getDocAnnotation(resource.Type, "warning", "note") - apiResource.DocAnnotation = resource.DocAnnotation - } -} diff --git a/pkg/internal/codegen/parse/context.go b/pkg/internal/codegen/parse/context.go deleted file mode 100644 index 98493540f..000000000 --- a/pkg/internal/codegen/parse/context.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/parser" -) - -// NewContext returns a new Context from the builder -func NewContext(p *parser.Builder) (*generator.Context, error) { - return generator.NewContext(p, NameSystems(), DefaultNameSystem()) -} - -// DefaultNameSystem returns public by default. -func DefaultNameSystem() string { - return "public" -} - -// NameSystems returns the name system used by the generators in this package. -// e.g. black-magic -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "public": namer.NewPublicNamer(1), - "raw": namer.NewRawNamer("", nil), - } -} diff --git a/pkg/internal/codegen/parse/crd.go b/pkg/internal/codegen/parse/crd.go deleted file mode 100644 index a03f6bb82..000000000 --- a/pkg/internal/codegen/parse/crd.go +++ /dev/null @@ -1,639 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "regexp" - "strconv" - "strings" - "text/template" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/types" -) - -// parseCRDs populates the CRD field of each Group.Version.Resource, -// creating validations using the annotations on type fields. -func (b *APIs) parseCRDs() { - for _, group := range b.APIs.Groups { - for _, version := range group.Versions { - for _, resource := range version.Resources { - if IsAPIResource(resource.Type) { - resource.JSONSchemaProps, resource.Validation = - b.typeToJSONSchemaProps(resource.Type, sets.NewString(), []string{}, true) - - // Note: Drop the Type field at the root level of validation - // schema. Refer to following issue for details. - // https://github.com/kubernetes/kubernetes/issues/65293 - resource.JSONSchemaProps.Type = "" - j, err := json.MarshalIndent(resource.JSONSchemaProps, "", " ") - if err != nil { - log.Fatalf("Could not Marshall validation %v\n", err) - } - resource.ValidationComments = string(j) - - resource.CRD = v1beta1.CustomResourceDefinition{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apiextensions.k8s.io/v1beta1", - Kind: "CustomResourceDefinition", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s.%s.%s", resource.Resource, resource.Group, resource.Domain), - Labels: map[string]string{"controller-tools.k8s.io": "1.0"}, - }, - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: fmt.Sprintf("%s.%s", resource.Group, resource.Domain), - Version: resource.Version, - Names: v1beta1.CustomResourceDefinitionNames{ - Kind: resource.Kind, - Plural: resource.Resource, - }, - Validation: &v1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &resource.JSONSchemaProps, - }, - }, - } - if resource.NonNamespaced { - resource.CRD.Spec.Scope = "Cluster" - } else { - resource.CRD.Spec.Scope = "Namespaced" - } - - if hasCategories(resource.Type) { - categoriesTag := getCategoriesTag(resource.Type) - categories := strings.Split(categoriesTag, ",") - resource.CRD.Spec.Names.Categories = categories - resource.Categories = categories - } - - if hasSingular(resource.Type) { - singularName := getSingularName(resource.Type) - resource.CRD.Spec.Names.Singular = singularName - } - - if hasStatusSubresource(resource.Type) { - if resource.CRD.Spec.Subresources == nil { - resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} - } - resource.CRD.Spec.Subresources.Status = &v1beta1.CustomResourceSubresourceStatus{} - } - - resource.CRD.Status.Conditions = []v1beta1.CustomResourceDefinitionCondition{} - resource.CRD.Status.StoredVersions = []string{} - - if hasScaleSubresource(resource.Type) { - if resource.CRD.Spec.Subresources == nil { - resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} - } - jsonPath, err := parseScaleParams(resource.Type) - if err != nil { - log.Fatalf("failed in parsing CRD, error: %v", err.Error()) - } - resource.CRD.Spec.Subresources.Scale = &v1beta1.CustomResourceSubresourceScale{ - SpecReplicasPath: jsonPath[specReplicasPath], - StatusReplicasPath: jsonPath[statusReplicasPath], - } - labelSelctor, ok := jsonPath[labelSelectorPath] - if ok && labelSelctor != "" { - resource.CRD.Spec.Subresources.Scale.LabelSelectorPath = &labelSelctor - } - } - if hasPrintColumn(resource.Type) { - result, err := parsePrintColumnParams(resource.Type) - if err != nil { - log.Fatalf("failed to parse printcolumn annotations, error: %v", err.Error()) - } - resource.CRD.Spec.AdditionalPrinterColumns = result - } - if len(resource.ShortName) > 0 { - resource.CRD.Spec.Names.ShortNames = strings.Split(resource.ShortName, ";") - } - } - } - } - } -} - -func (b *APIs) getTime() string { - return `v1beta1.JSONSchemaProps{ - Type: "string", - Format: "date-time", -}` -} - -func (b *APIs) getDuration() string { - return `v1beta1.JSONSchemaProps{ - Type: "string", -}` -} - -func (b *APIs) getQuantity() string { - return `v1beta1.JSONSchemaProps{ - Type: "string", -}` -} - -func (b *APIs) objSchema() string { - return `v1beta1.JSONSchemaProps{ - Type: "object", -}` -} - -// typeToJSONSchemaProps returns a JSONSchemaProps object and its serialization -// in Go that describe the JSONSchema validations for the given type. -func (b *APIs) typeToJSONSchemaProps(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { - // Special cases - time := types.Name{Name: "Time", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} - duration := types.Name{Name: "Duration", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} - quantity := types.Name{Name: "Quantity", Package: "k8s.io/apimachinery/pkg/api/resource"} - meta := types.Name{Name: "ObjectMeta", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} - unstructured := types.Name{Name: "Unstructured", Package: "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"} - rawExtension := types.Name{Name: "RawExtension", Package: "k8s.io/apimachinery/pkg/runtime"} - intOrString := types.Name{Name: "IntOrString", Package: "k8s.io/apimachinery/pkg/util/intstr"} - // special types first - specialTypeProps := v1beta1.JSONSchemaProps{ - Description: parseDescription(comments), - } - for _, l := range comments { - getValidation(l, &specialTypeProps) - } - switch t.Name { - case time: - specialTypeProps.Type = "string" - specialTypeProps.Format = "date-time" - return specialTypeProps, b.getTime() - case duration: - specialTypeProps.Type = "string" - return specialTypeProps, b.getDuration() - case quantity: - specialTypeProps.Type = "string" - return specialTypeProps, b.getQuantity() - case meta, unstructured, rawExtension: - specialTypeProps.Type = "object" - return specialTypeProps, b.objSchema() - case intOrString: - specialTypeProps.AnyOf = []v1beta1.JSONSchemaProps{ - { - Type: "string", - }, - { - Type: "integer", - }, - } - return specialTypeProps, b.objSchema() - } - - var v v1beta1.JSONSchemaProps - var s string - switch t.Kind { - case types.Builtin: - v, s = b.parsePrimitiveValidation(t, found, comments) - case types.Struct: - v, s = b.parseObjectValidation(t, found, comments, isRoot) - case types.Map: - v, s = b.parseMapValidation(t, found, comments) - case types.Slice: - v, s = b.parseArrayValidation(t, found, comments) - case types.Array: - v, s = b.parseArrayValidation(t, found, comments) - case types.Pointer: - v, s = b.typeToJSONSchemaProps(t.Elem, found, comments, false) - case types.Alias: - v, s = b.typeToJSONSchemaProps(t.Underlying, found, comments, false) - default: - log.Fatalf("Unknown supported Kind %v\n", t.Kind) - } - - return v, s -} - -var jsonRegex = regexp.MustCompile("json:\"([a-zA-Z0-9,]+)\"") - -type primitiveTemplateArgs struct { - v1beta1.JSONSchemaProps - Value string - Format string - EnumValue string // TODO check type of enum value to match the type of field - Description string -} - -var primitiveTemplate = template.Must(template.New("map-template").Parse( - `v1beta1.JSONSchemaProps{ - {{ if .Pattern -}} - Pattern: "{{ .Pattern }}", - {{ end -}} - {{ if .Maximum -}} - Maximum: getFloat({{ .Maximum }}), - {{ end -}} - {{ if .ExclusiveMaximum -}} - ExclusiveMaximum: {{ .ExclusiveMaximum }}, - {{ end -}} - {{ if .Minimum -}} - Minimum: getFloat({{ .Minimum }}), - {{ end -}} - {{ if .ExclusiveMinimum -}} - ExclusiveMinimum: {{ .ExclusiveMinimum }}, - {{ end -}} - Type: "{{ .Value }}", - {{ if .Format -}} - Format: "{{ .Format }}", - {{ end -}} - {{ if .EnumValue -}} - Enum: {{ .EnumValue }}, - {{ end -}} - {{ if .MaxLength -}} - MaxLength: getInt({{ .MaxLength }}), - {{ end -}} - {{ if .MinLength -}} - MinLength: getInt({{ .MinLength }}), - {{ end -}} -}`)) - -// parsePrimitiveValidation returns a JSONSchemaProps object and its -// serialization in Go that describe the validations for the given primitive -// type. -func (b *APIs) parsePrimitiveValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { - props := v1beta1.JSONSchemaProps{Type: string(t.Name.Name)} - - for _, l := range comments { - getValidation(l, &props) - } - - buff := &bytes.Buffer{} - - var n, f, s, d string - switch t.Name.Name { - case "int", "int64", "uint64": - n = "integer" - f = "int64" - case "int32", "uint32": - n = "integer" - f = "int32" - case "float", "float32": - n = "number" - f = "float" - case "float64": - n = "number" - f = "double" - case "bool": - n = "boolean" - case "string": - n = "string" - f = props.Format - default: - n = t.Name.Name - } - if props.Enum != nil { - s = parseEnumToString(props.Enum) - } - d = parseDescription(comments) - if err := primitiveTemplate.Execute(buff, primitiveTemplateArgs{props, n, f, s, d}); err != nil { - log.Fatalf("%v", err) - } - props.Type = n - props.Format = f - props.Description = d - return props, buff.String() -} - -type mapTempateArgs struct { - Result string - SkipMapValidation bool -} - -var mapTemplate = template.Must(template.New("map-template").Parse( - `v1beta1.JSONSchemaProps{ - Type: "object", - {{if not .SkipMapValidation}}AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{ - Allows: true, - Schema: &{{.Result}}, - },{{end}} -}`)) - -// parseMapValidation returns a JSONSchemaProps object and its serialization in -// Go that describe the validations for the given map type. -func (b *APIs) parseMapValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { - additionalProps, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false) - additionalProps.Description = "" - props := v1beta1.JSONSchemaProps{ - Type: "object", - Description: parseDescription(comments), - } - parseOption := b.arguments.CustomArgs.(*Options) - if !parseOption.SkipMapValidation { - props.AdditionalProperties = &v1beta1.JSONSchemaPropsOrBool{ - Allows: true, - Schema: &additionalProps} - } - - for _, l := range comments { - getValidation(l, &props) - } - - buff := &bytes.Buffer{} - if err := mapTemplate.Execute(buff, mapTempateArgs{Result: result, SkipMapValidation: parseOption.SkipMapValidation}); err != nil { - log.Fatalf("%v", err) - } - return props, buff.String() -} - -var arrayTemplate = template.Must(template.New("array-template").Parse( - `v1beta1.JSONSchemaProps{ - Type: "{{.Type}}", - {{ if .Format -}} - Format: "{{.Format}}", - {{ end -}} - {{ if .MaxItems -}} - MaxItems: getInt({{ .MaxItems }}), - {{ end -}} - {{ if .MinItems -}} - MinItems: getInt({{ .MinItems }}), - {{ end -}} - {{ if .UniqueItems -}} - UniqueItems: {{ .UniqueItems }}, - {{ end -}} - {{ if .Items -}} - Items: &v1beta1.JSONSchemaPropsOrArray{ - Schema: &{{.ItemsSchema}}, - }, - {{ end -}} -}`)) - -type arrayTemplateArgs struct { - v1beta1.JSONSchemaProps - ItemsSchema string -} - -// parseArrayValidation returns a JSONSchemaProps object and its serialization in -// Go that describe the validations for the given array type. -func (b *APIs) parseArrayValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { - items, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false) - items.Description = "" - props := v1beta1.JSONSchemaProps{ - Type: "array", - Items: &v1beta1.JSONSchemaPropsOrArray{Schema: &items}, - Description: parseDescription(comments), - } - // To represent byte arrays in the generated code, the property of the OpenAPI definition - // should have string as its type and byte as its format. - if t.Name.Name == "[]byte" { - props.Type = "string" - props.Format = "byte" - props.Items = nil - props.Description = parseDescription(comments) - } - for _, l := range comments { - getValidation(l, &props) - } - if t.Name.Name != "[]byte" { - // Except for the byte array special case above, the "format" property - // should be applied to the array items and not the array itself. - props.Format = "" - } - buff := &bytes.Buffer{} - if err := arrayTemplate.Execute(buff, arrayTemplateArgs{props, result}); err != nil { - log.Fatalf("%v", err) - } - return props, buff.String() -} - -type objectTemplateArgs struct { - v1beta1.JSONSchemaProps - Fields map[string]string - Required []string - IsRoot bool -} - -var objectTemplate = template.Must(template.New("object-template").Parse( - `v1beta1.JSONSchemaProps{ - {{ if not .IsRoot -}} - Type: "object", - {{ end -}} - Properties: map[string]v1beta1.JSONSchemaProps{ - {{ range $k, $v := .Fields -}} - "{{ $k }}": {{ $v }}, - {{ end -}} - }, - {{if .Required}}Required: []string{ - {{ range $k, $v := .Required -}} - "{{ $v }}", - {{ end -}} - },{{ end -}} -}`)) - -// parseObjectValidation returns a JSONSchemaProps object and its serialization in -// Go that describe the validations for the given object type. -func (b *APIs) parseObjectValidation(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { - buff := &bytes.Buffer{} - props := v1beta1.JSONSchemaProps{ - Type: "object", - Description: parseDescription(comments), - } - - for _, l := range comments { - getValidation(l, &props) - } - - if strings.HasPrefix(t.Name.String(), "k8s.io/api") { - if err := objectTemplate.Execute(buff, objectTemplateArgs{props, nil, nil, false}); err != nil { - log.Fatalf("%v", err) - } - } else { - m, result, required := b.getMembers(t, found) - props.Properties = m - props.Required = required - - if err := objectTemplate.Execute(buff, objectTemplateArgs{props, result, required, isRoot}); err != nil { - log.Fatalf("%v", err) - } - } - return props, buff.String() -} - -// getValidation parses the validation tags from the comment and sets the -// validation rules on the given JSONSchemaProps. -func getValidation(comment string, props *v1beta1.JSONSchemaProps) { - comment = strings.TrimLeft(comment, " ") - if !strings.HasPrefix(comment, "+kubebuilder:validation:") { - return - } - c := strings.Replace(comment, "+kubebuilder:validation:", "", -1) - parts := strings.Split(c, "=") - if len(parts) != 2 { - log.Fatalf("Expected +kubebuilder:validation:= actual: %s", comment) - return - } - switch parts[0] { - case "Maximum": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.Maximum = &f - case "ExclusiveMaximum": - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.ExclusiveMaximum = b - case "Minimum": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.Minimum = &f - case "ExclusiveMinimum": - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.ExclusiveMinimum = b - case "MaxLength": - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MaxLength = &v - case "MinLength": - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MinLength = &v - case "Pattern": - props.Pattern = parts[1] - case "MaxItems": - if props.Type == "array" { - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MaxItems = &v - } - case "MinItems": - if props.Type == "array" { - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MinItems = &v - } - case "UniqueItems": - if props.Type == "array" { - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.UniqueItems = b - } - case "MultipleOf": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.MultipleOf = &f - case "Enum": - if props.Type != "array" { - value := strings.Split(parts[1], ",") - enums := []v1beta1.JSON{} - for _, s := range value { - checkType(props, s, &enums) - } - props.Enum = enums - } - case "Format": - props.Format = parts[1] - default: - log.Fatalf("Unsupport validation: %s", comment) - } -} - -// getMembers builds maps by field name of the JSONSchemaProps and their Go -// serializations. -func (b *APIs) getMembers(t *types.Type, found sets.String) (map[string]v1beta1.JSONSchemaProps, map[string]string, []string) { - members := map[string]v1beta1.JSONSchemaProps{} - result := map[string]string{} - required := []string{} - - // Don't allow recursion until we support it through refs - // TODO: Support recursion - if found.Has(t.Name.String()) { - fmt.Printf("Breaking recursion for type %s", t.Name.String()) - return members, result, required - } - found.Insert(t.Name.String()) - - for _, member := range t.Members { - tags := jsonRegex.FindStringSubmatch(member.Tags) - if len(tags) == 0 { - // Skip fields without json tags - //fmt.Printf("Skipping member %s %s\n", member.Name, member.Type.Name.String()) - continue - } - ts := strings.Split(tags[1], ",") - name := member.Name - strat := "" - if len(ts) > 0 && len(ts[0]) > 0 { - name = ts[0] - } - if len(ts) > 1 { - strat = ts[1] - } - - // Inline "inline" structs - if strat == "inline" { - m, r, re := b.getMembers(member.Type, found) - for n, v := range m { - members[n] = v - } - for n, v := range r { - result[n] = v - } - required = append(required, re...) - } else { - m, r := b.typeToJSONSchemaProps(member.Type, found, member.CommentLines, false) - members[name] = m - result[name] = r - if !strings.HasSuffix(strat, "omitempty") { - required = append(required, name) - } - } - } - - defer found.Delete(t.Name.String()) - return members, result, required -} diff --git a/pkg/internal/codegen/parse/index.go b/pkg/internal/codegen/parse/index.go deleted file mode 100644 index a08cf751b..000000000 --- a/pkg/internal/codegen/parse/index.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "log" - "strings" - - "github.com/markbates/inflect" - "k8s.io/gengo/types" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -// parseIndex indexes all types with the comment "// +resource=RESOURCE" by GroupVersionKind and -// GroupKindVersion -func (b *APIs) parseIndex() { - // Index resource by group, version, kind - b.ByGroupVersionKind = map[string]map[string]map[string]*codegen.APIResource{} - - // Index resources by group, kind, version - b.ByGroupKindVersion = map[string]map[string]map[string]*codegen.APIResource{} - - // Index subresources by group, version, kind - b.SubByGroupVersionKind = map[string]map[string]map[string]*types.Type{} - - for _, c := range b.context.Order { - // The type is a subresource, add it to the subresource index - if IsAPISubresource(c) { - group := GetGroup(c) - version := GetVersion(c, group) - kind := GetKind(c, group) - if _, f := b.SubByGroupVersionKind[group]; !f { - b.SubByGroupVersionKind[group] = map[string]map[string]*types.Type{} - } - if _, f := b.SubByGroupVersionKind[group][version]; !f { - b.SubByGroupVersionKind[group][version] = map[string]*types.Type{} - } - b.SubByGroupVersionKind[group][version][kind] = c - } - - // If it isn't a subresource or resource, continue to the next type - if !IsAPIResource(c) { - continue - } - - // Parse out the resource information - r := &codegen.APIResource{ - Type: c, - NonNamespaced: IsNonNamespaced(c), - } - r.Group = GetGroup(c) - r.Version = GetVersion(c, r.Group) - r.Kind = GetKind(c, r.Group) - r.Domain = b.Domain - - // TODO: revisit the part... - if r.Resource == "" { - rs := inflect.NewDefaultRuleset() - r.Resource = rs.Pluralize(strings.ToLower(r.Kind)) - } - rt, err := parseResourceAnnotation(c) - if err != nil { - log.Fatalf("failed to parse resource annotations, error: %v", err.Error()) - } - if rt.Resource != "" { - r.Resource = rt.Resource - } - r.ShortName = rt.ShortName - - // Copy the Status strategy to mirror the non-status strategy - r.StatusStrategy = strings.TrimSuffix(r.Strategy, "Strategy") - r.StatusStrategy = fmt.Sprintf("%sStatusStrategy", r.StatusStrategy) - - // Initialize the map entries so they aren't nill - if _, f := b.ByGroupKindVersion[r.Group]; !f { - b.ByGroupKindVersion[r.Group] = map[string]map[string]*codegen.APIResource{} - } - if _, f := b.ByGroupKindVersion[r.Group][r.Kind]; !f { - b.ByGroupKindVersion[r.Group][r.Kind] = map[string]*codegen.APIResource{} - } - if _, f := b.ByGroupVersionKind[r.Group]; !f { - b.ByGroupVersionKind[r.Group] = map[string]map[string]*codegen.APIResource{} - } - if _, f := b.ByGroupVersionKind[r.Group][r.Version]; !f { - b.ByGroupVersionKind[r.Group][r.Version] = map[string]*codegen.APIResource{} - } - - // Add the resource to the map - b.ByGroupKindVersion[r.Group][r.Kind][r.Version] = r - b.ByGroupVersionKind[r.Group][r.Version][r.Kind] = r - r.Type = c - } -} - -// resourceTags contains the tags present in a "+resource=" comment -type resourceTags struct { - Resource string - REST string - Strategy string - ShortName string -} - -// resourceAnnotationValue is a helper function to extract resource annotation. -func resourceAnnotationValue(tag string) (resourceTags, error) { - res := resourceTags{} - for _, elem := range strings.Split(tag, ",") { - key, value, err := general.ParseKV(elem) - if err != nil { - return resourceTags{}, fmt.Errorf("// +kubebuilder:resource: tags must be key value pairs. Expected "+ - "keys [path=] "+ - "Got string: [%s]", tag) - } - switch key { - case "path": - res.Resource = value - case "shortName": - res.ShortName = value - default: - return resourceTags{}, fmt.Errorf("The given input %s is invalid", value) - } - } - return res, nil -} - -// parseResourceAnnotation parses the tags in a "+resource=" comment into a resourceTags struct. -func parseResourceAnnotation(t *types.Type) (resourceTags, error) { - finalResult := resourceTags{} - var resourceAnnotationFound bool - for _, comment := range t.CommentLines { - anno := general.GetAnnotation(comment, "kubebuilder:resource") - if len(anno) == 0 { - continue - } - result, err := resourceAnnotationValue(anno) - if err != nil { - return resourceTags{}, err - } - if resourceAnnotationFound { - return resourceTags{}, fmt.Errorf("resource annotation should only exists once per type") - } - resourceAnnotationFound = true - finalResult = result - } - return finalResult, nil -} diff --git a/pkg/internal/codegen/parse/parser.go b/pkg/internal/codegen/parse/parser.go deleted file mode 100644 index c7a55dd3d..000000000 --- a/pkg/internal/codegen/parse/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "bufio" - "go/build" - "log" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" -) - -// APIs is the information of a collection of API -type APIs struct { - context *generator.Context - arguments *args.GeneratorArgs - Domain string - VersionedPkgs sets.String - UnversionedPkgs sets.String - APIsPkg string - APIsPkgRaw *types.Package - GroupNames sets.String - - APIs *codegen.APIs - Controllers []codegen.Controller - - ByGroupKindVersion map[string]map[string]map[string]*codegen.APIResource - ByGroupVersionKind map[string]map[string]map[string]*codegen.APIResource - SubByGroupVersionKind map[string]map[string]map[string]*types.Type - Groups map[string]types.Package - Rules []rbacv1.PolicyRule - Informers map[v1.GroupVersionKind]bool -} - -// NewAPIs returns a new APIs instance with given context. -func NewAPIs(context *generator.Context, arguments *args.GeneratorArgs, domain, apisPkg string) *APIs { - b := &APIs{ - context: context, - arguments: arguments, - Domain: domain, - APIsPkg: apisPkg, - } - b.parsePackages() - b.parseGroupNames() - b.parseIndex() - b.parseAPIs() - b.parseCRDs() - if len(b.Domain) == 0 { - b.parseDomain() - } - return b -} - -// parseGroupNames initializes b.GroupNames with the set of all groups -func (b *APIs) parseGroupNames() { - b.GroupNames = sets.String{} - for p := range b.UnversionedPkgs { - pkg := b.context.Universe[p] - if pkg == nil { - // If the input had no Go files, for example. - continue - } - b.GroupNames.Insert(filepath.Base(p)) - } -} - -// parsePackages parses out the sets of Versioned, Unversioned packages and identifies the root Apis package. -func (b *APIs) parsePackages() { - b.VersionedPkgs = sets.NewString() - b.UnversionedPkgs = sets.NewString() - for _, o := range b.context.Order { - if IsAPIResource(o) { - versioned := o.Name.Package - b.VersionedPkgs.Insert(versioned) - - unversioned := filepath.Dir(versioned) - b.UnversionedPkgs.Insert(unversioned) - } - } -} - -// parseDomain parses the domain from the apis/doc.go file comment "// +domain=YOUR_DOMAIN". -func (b *APIs) parseDomain() { - pkg := b.context.Universe[b.APIsPkg] - if pkg == nil { - // If the input had no Go files, for example. - panic(errors.Errorf("Missing apis package.")) - } - comments := Comments(pkg.Comments) - b.Domain = comments.getTag("domain", "=") - if len(b.Domain) == 0 { - b.Domain = parseDomainFromFiles(b.context.Inputs) - if len(b.Domain) == 0 { - panic("Could not find string matching // +domain=.+ in apis/doc.go") - } - } -} - -func parseDomainFromFiles(paths []string) string { - var domain string - for _, path := range paths { - if strings.HasSuffix(path, "pkg/apis") { - filePath := strings.Join([]string{build.Default.GOPATH, "src", path, "doc.go"}, "/") - lines := []string{} - - file, err := os.Open(filePath) - if err != nil { - log.Fatal(err) - } - defer file.Close() - scanner := bufio.NewScanner(file) - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "//") { - lines = append(lines, strings.Replace(scanner.Text(), "// ", "", 1)) - } - } - if err := scanner.Err(); err != nil { - log.Fatal(err) - } - - comments := Comments(lines) - domain = comments.getTag("domain", "=") - break - } - } - return domain -} diff --git a/pkg/internal/codegen/parse/util.go b/pkg/internal/codegen/parse/util.go deleted file mode 100644 index 7df445353..000000000 --- a/pkg/internal/codegen/parse/util.go +++ /dev/null @@ -1,539 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "log" - "path/filepath" - "strconv" - "strings" - - "github.com/pkg/errors" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/gengo/types" -) - -const ( - specReplicasPath = "specpath" - statusReplicasPath = "statuspath" - labelSelectorPath = "selectorpath" - jsonPathError = "invalid scale path. specpath, statuspath key-value pairs are required, only selectorpath key-value is optinal. For example: // +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=.spec.Label" - printColumnName = "name" - printColumnType = "type" - printColumnDescr = "description" - printColumnPath = "JSONPath" - printColumnFormat = "format" - printColumnPri = "priority" - printColumnError = "invalid printcolumn path. name,type, and JSONPath are required kye-value pairs and rest of the fields are optinal. For example: // +kubebuilder:printcolumn:name=abc,type=string,JSONPath=status" -) - -// Options contains the parser options -type Options struct { - SkipMapValidation bool - - // SkipRBACValidation flag determines whether to check RBAC annotations - // for the controller or not at parse stage. - SkipRBACValidation bool -} - -// IsAPIResource returns true if either of the two conditions become true: -// 1. t has a +resource/+kubebuilder:resource comment tag -// 2. t has TypeMeta and ObjectMeta in its member list. -func IsAPIResource(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+resource") || strings.Contains(c, "+kubebuilder:resource") { - return true - } - } - - typeMetaFound, objMetaFound := false, false - for _, m := range t.Members { - if m.Name == "TypeMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta" { - typeMetaFound = true - } - if m.Name == "ObjectMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta" { - objMetaFound = true - } - if typeMetaFound && objMetaFound { - return true - } - } - return false -} - -// IsNonNamespaced returns true if t has a +nonNamespaced comment tag -func IsNonNamespaced(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - - for _, c := range t.CommentLines { - if strings.Contains(c, "+genclient:nonNamespaced") { - return true - } - } - - for _, c := range t.SecondClosestCommentLines { - if strings.Contains(c, "+genclient:nonNamespaced") { - return true - } - } - - return false -} - -// IsController returns true if t has a +controller or +kubebuilder:controller tag -func IsController(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+controller") || strings.Contains(c, "+kubebuilder:controller") { - return true - } - } - return false -} - -// IsRBAC returns true if t has a +rbac or +kubebuilder:rbac tag -func IsRBAC(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+rbac") || strings.Contains(c, "+kubebuilder:rbac") { - return true - } - } - return false -} - -// hasPrintColumn returns true if t has a +printcolumn or +kubebuilder:printcolumn annotation. -func hasPrintColumn(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+printcolumn") || strings.Contains(c, "+kubebuilder:printcolumn") { - return true - } - } - return false -} - -// IsInformer returns true if t has a +informers or +kubebuilder:informers tag -func IsInformer(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+informers") || strings.Contains(c, "+kubebuilder:informers") { - return true - } - } - return false -} - -// IsAPISubresource returns true if t has a +subresource-request comment tag -func IsAPISubresource(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+subresource-request") { - return true - } - } - return false -} - -// HasSubresource returns true if t is an APIResource with one or more Subresources -func HasSubresource(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "subresource") { - return true - } - } - return false -} - -// hasStatusSubresource returns true if t is an APIResource annotated with -// +kubebuilder:subresource:status -func hasStatusSubresource(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:subresource:status") { - return true - } - } - return false -} - -// hasScaleSubresource returns true if t is an APIResource annotated with -// +kubebuilder:subresource:scale -func hasScaleSubresource(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:subresource:scale") { - return true - } - } - return false -} - -// hasCategories returns true if t is an APIResource annotated with -// +kubebuilder:categories -func hasCategories(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:categories") { - return true - } - } - return false -} - -// HasDocAnnotation returns true if t is an APIResource with doc annotation -// +kubebuilder:doc -func HasDocAnnotation(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:doc") { - return true - } - } - return false -} - -// hasSingular returns true if t is an APIResource annotated with -// +kubebuilder:singular -func hasSingular(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines{ - if strings.Contains(c, "+kubebuilder:singular"){ - return true - } - } - return false -} - -// IsUnversioned returns true if t is in given group, and not in versioned path. -func IsUnversioned(t *types.Type, group string) bool { - return IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) && GetGroup(t) == group -} - -// IsVersioned returns true if t is in given group, and in versioned path. -func IsVersioned(t *types.Type, group string) bool { - dir := filepath.Base(filepath.Dir(filepath.Dir(t.Name.Package))) - return IsApisDir(dir) && GetGroup(t) == group -} - -// GetVersion returns version of t. -func GetVersion(t *types.Type, group string) string { - if !IsVersioned(t, group) { - panic(errors.Errorf("Cannot get version for unversioned type %v", t.Name)) - } - return filepath.Base(t.Name.Package) -} - -// GetGroup returns group of t. -func GetGroup(t *types.Type) string { - return filepath.Base(GetGroupPackage(t)) -} - -// GetGroupPackage returns group package of t. -func GetGroupPackage(t *types.Type) string { - if IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) { - return t.Name.Package - } - return filepath.Dir(t.Name.Package) -} - -// GetKind returns kind of t. -func GetKind(t *types.Type, group string) string { - if !IsVersioned(t, group) && !IsUnversioned(t, group) { - panic(errors.Errorf("Cannot get kind for type not in group %v", t.Name)) - } - return t.Name.Name -} - -// IsApisDir returns true if a directory path is a Kubernetes api directory -func IsApisDir(dir string) bool { - return dir == "apis" || dir == "api" -} - -// Comments is a structure for using comment tags on go structs and fields -type Comments []string - -// GetTags returns the value for the first comment with a prefix matching "+name=" -// e.g. "+name=foo\n+name=bar" would return "foo" -func (c Comments) getTag(name, sep string) string { - for _, c := range c { - prefix := fmt.Sprintf("+%s%s", name, sep) - if strings.HasPrefix(c, prefix) { - return strings.Replace(c, prefix, "", 1) - } - } - return "" -} - -// hasTag returns true if the Comments has a tag with the given name -func (c Comments) hasTag(name string) bool { - for _, c := range c { - prefix := fmt.Sprintf("+%s", name) - if strings.HasPrefix(c, prefix) { - return true - } - } - return false -} - -// GetTags returns the value for all comments with a prefix and separator. E.g. for "name" and "=" -// "+name=foo\n+name=bar" would return []string{"foo", "bar"} -func (c Comments) getTags(name, sep string) []string { - tags := []string{} - for _, c := range c { - prefix := fmt.Sprintf("+%s%s", name, sep) - if strings.HasPrefix(c, prefix) { - tags = append(tags, strings.Replace(c, prefix, "", 1)) - } - } - return tags -} - -// getCategoriesTag returns the value of the +kubebuilder:categories tags -func getCategoriesTag(c *types.Type) string { - comments := Comments(c.CommentLines) - resource := comments.getTag("kubebuilder:categories", "=") - if len(resource) == 0 { - panic(errors.Errorf("Must specify +kubebuilder:categories comment for type %v", c.Name)) - } - return resource -} - -// getSingularName returns the value of the +kubebuilder:singular tag -func getSingularName(c *types.Type) string { - comments := Comments(c.CommentLines) - singular := comments.getTag("kubebuilder:singular", "=") - if len(singular) == 0 { - panic(errors.Errorf("Must specify a value to use with +kubebuilder:singular comment for type %v", c.Name)) - } - return singular -} - -// getDocAnnotation parse annotations of "+kubebuilder:doc:" with tags of "warning" or "doc" for control generating doc config. -// E.g. +kubebuilder:doc:warning=foo +kubebuilder:doc:note=bar -func getDocAnnotation(t *types.Type, tags ...string) map[string]string { - annotation := make(map[string]string) - for _, tag := range tags { - for _, c := range t.CommentLines { - prefix := fmt.Sprintf("+kubebuilder:doc:%s=", tag) - if strings.HasPrefix(c, prefix) { - annotation[tag] = strings.Replace(c, prefix, "", 1) - } - } - } - return annotation -} - -// parseByteValue returns the literal digital number values from a byte array -func parseByteValue(b []byte) string { - elem := strings.Join(strings.Fields(fmt.Sprintln(b)), ",") - elem = strings.TrimPrefix(elem, "[") - elem = strings.TrimSuffix(elem, "]") - return elem -} - -// parseDescription parse comments above each field in the type definition. -func parseDescription(res []string) string { - var temp strings.Builder - var desc string - for _, comment := range res { - if !(strings.Contains(comment, "+kubebuilder") || strings.Contains(comment, "+optional")) { - temp.WriteString(comment) - temp.WriteString(" ") - desc = strings.TrimRight(temp.String(), " ") - } - } - return desc -} - -// parseEnumToString returns a representive validated go format string from JSONSchemaProps schema -func parseEnumToString(value []v1beta1.JSON) string { - res := "[]v1beta1.JSON{" - prefix := "v1beta1.JSON{[]byte{" - for _, v := range value { - res = res + prefix + parseByteValue(v.Raw) + "}}," - } - return strings.TrimSuffix(res, ",") + "}" -} - -// check type of enum element value to match type of field -func checkType(props *v1beta1.JSONSchemaProps, s string, enums *[]v1beta1.JSON) { - - // TODO support more types check - switch props.Type { - case "int", "int64", "uint64": - if _, err := strconv.ParseInt(s, 0, 64); err != nil { - log.Fatalf("Invalid integer value [%v] for a field of integer type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "int32", "unit32": - if _, err := strconv.ParseInt(s, 0, 32); err != nil { - log.Fatalf("Invalid integer value [%v] for a field of integer32 type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "float", "float32": - if _, err := strconv.ParseFloat(s, 32); err != nil { - log.Fatalf("Invalid float value [%v] for a field of float32 type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "float64": - if _, err := strconv.ParseFloat(s, 64); err != nil { - log.Fatalf("Invalid float value [%v] for a field of float type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "string": - *enums = append(*enums, v1beta1.JSON{Raw: []byte(`"` + s + `"`)}) - } -} - -// Scale subresource requires specpath, statuspath, selectorpath key values, represents for JSONPath of -// SpecReplicasPath, StatusReplicasPath, LabelSelectorPath separately. e.g. -// +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath= -func parseScaleParams(t *types.Type) (map[string]string, error) { - jsonPath := make(map[string]string) - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:subresource:scale") { - paths := strings.Replace(c, "+kubebuilder:subresource:scale:", "", -1) - path := strings.Split(paths, ",") - if len(path) < 2 { - return nil, fmt.Errorf(jsonPathError) - } - for _, s := range path { - kv := strings.Split(s, "=") - if kv[0] == specReplicasPath || kv[0] == statusReplicasPath || kv[0] == labelSelectorPath { - jsonPath[kv[0]] = kv[1] - } else { - return nil, fmt.Errorf(jsonPathError) - } - } - var ok bool - _, ok = jsonPath[specReplicasPath] - if !ok { - return nil, fmt.Errorf(jsonPathError) - } - _, ok = jsonPath[statusReplicasPath] - if !ok { - return nil, fmt.Errorf(jsonPathError) - } - return jsonPath, nil - } - } - return nil, fmt.Errorf(jsonPathError) -} - -// printColumnKV parses key-value string formatted as "foo=bar" and returns key and value. -func printColumnKV(s string) (key, value string, err error) { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - err = fmt.Errorf("invalid key value pair") - return key, value, err - } - key, value = kv[0], kv[1] - if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { - value = value[1 : len(value)-1] - } - return key, value, err -} - -// helperPrintColumn is a helper function for the parsePrintColumnParams to compute printer columns. -func helperPrintColumn(parts string, comment string) (v1beta1.CustomResourceColumnDefinition, error) { - config := v1beta1.CustomResourceColumnDefinition{} - var count int - part := strings.Split(parts, ",") - if len(part) < 3 { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - - for _, elem := range strings.Split(parts, ",") { - key, value, err := printColumnKV(elem) - if err != nil { - return v1beta1.CustomResourceColumnDefinition{}, - fmt.Errorf("//+kubebuilder:printcolumn: tags must be key value pairs.Expected "+ - "keys [name=,type=,description=,format=] "+ - "Got string: [%s]", parts) - } - if key == printColumnName || key == printColumnType || key == printColumnPath { - count++ - } - switch key { - case printColumnName: - config.Name = value - case printColumnType: - if value == "integer" || value == "number" || value == "string" || value == "boolean" || value == "date" { - config.Type = value - } else { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnType) - } - case printColumnFormat: - if config.Type == "integer" && (value == "int32" || value == "int64") { - config.Format = value - } else if config.Type == "number" && (value == "float" || value == "double") { - config.Format = value - } else if config.Type == "string" && (value == "byte" || value == "date" || value == "date-time" || value == "password") { - config.Format = value - } else { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnFormat) - } - case printColumnPath: - config.JSONPath = value - case printColumnPri: - i, err := strconv.Atoi(value) - v := int32(i) - if err != nil { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnPri) - } - config.Priority = v - case printColumnDescr: - config.Description = value - default: - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - } - if count != 3 { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - return config, nil -} - -// printcolumn requires name,type,JSONPath fields and rest of the field are optional -// +kubebuilder:printcolumn:name=,type=,description=,JSONPath:<.spec.Name>,priority=,format= -func parsePrintColumnParams(t *types.Type) ([]v1beta1.CustomResourceColumnDefinition, error) { - result := []v1beta1.CustomResourceColumnDefinition{} - for _, comment := range t.CommentLines { - if strings.Contains(comment, "+kubebuilder:printcolumn") { - parts := strings.Replace(comment, "+kubebuilder:printcolumn:", "", -1) - res, err := helperPrintColumn(parts, comment) - if err != nil { - return []v1beta1.CustomResourceColumnDefinition{}, err - } - result = append(result, res) - } - } - return result, nil -} diff --git a/pkg/internal/codegen/parse/util_test.go b/pkg/internal/codegen/parse/util_test.go deleted file mode 100644 index baec23a63..000000000 --- a/pkg/internal/codegen/parse/util_test.go +++ /dev/null @@ -1,216 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "reflect" - "testing" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/gengo/types" -) - -func TestParseScaleParams(t *testing.T) { - testCases := []struct { - name string - tag string - expected map[string]string - parseErr error - }{ - { - name: "test ok", - tag: "+kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=.spec.Label", - expected: map[string]string{ - specReplicasPath: ".spec.replica", - statusReplicasPath: ".status.replica", - labelSelectorPath: ".spec.Label", - }, - parseErr: nil, - }, - { - name: "test ok without selectorpath", - tag: "+kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica", - expected: map[string]string{ - specReplicasPath: ".spec.replica", - statusReplicasPath: ".status.replica", - }, - parseErr: nil, - }, - { - name: "test ok selectorpath has empty value", - tag: "+kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=", - expected: map[string]string{ - specReplicasPath: ".spec.replica", - statusReplicasPath: ".status.replica", - labelSelectorPath: "", - }, - parseErr: nil, - }, - { - name: "test no jsonpath", - tag: "+kubebuilder:subresource:scale", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - { - name: "test no specpath", - tag: "+kubebuilder:subresource:scale:statuspath=.status.replica,selectorpath=.jsonpath", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - { - name: "test no statuspath", - tag: "+kubebuilder:subresource:scale:specpath=.spec.replica,selectorpath=.jsonpath", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - { - name: "test statuspath is empty string", - tag: "+kubebuilder:subresource:scale:statuspath=,selectorpath=.jsonpath", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - { - name: "test scale jsonpath has incorrect separator", - tag: "+kubebuilder:subresource:scale,specpath=.spec.replica,statuspath=.jsonpath,selectorpath=.jsonpath", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - { - name: "test scale jsonpath has extra separator", - tag: "+kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replicas,selectorpath=.jsonpath,", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - { - name: "test scale jsonpath has incorrect separator in-between key value pairs", - tag: "+kubebuilder:subresource:scale:specpath=.spec.replica;statuspath=.jsonpath;selectorpath=.jsonpath", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - { - name: "test unsupported key value pairs", - tag: "+kubebuilder:subresource:scale:name=test,specpath=.spec.replica,statuspath=.status.replicas,selectorpath=.jsonpath", - expected: nil, - parseErr: fmt.Errorf(jsonPathError), - }, - } - - for _, tc := range testCases { - t.Logf("test case: %s", tc.name) - r := &types.Type{} - r.CommentLines = []string{tc.tag} - res, err := parseScaleParams(r) - if !reflect.DeepEqual(err, tc.parseErr) { - t.Errorf("test [%s] failed. error is (%v),\n but expected (%v)", tc.name, err, tc.parseErr) - } - if !reflect.DeepEqual(res, tc.expected) { - t.Errorf("test [%s] failed. result is (%v),\n but expected (%v)", tc.name, res, tc.expected) - } - } -} -func TestParsePrintColumnParams(t *testing.T) { - tests := []struct { - name string - tag string - expected []v1beta1.CustomResourceColumnDefinition - parseErr error - }{ - { - name: "test ok", - tag: "+kubebuilder:printcolumn:name=toy,type=string,JSONPath=.status.conditions[?(@.type==\"Ready\")].status,description=descr1,priority=3,format=date", - expected: []v1beta1.CustomResourceColumnDefinition{ - { - Name: "toy", - Type: "string", - Format: "date", - Description: "descr1", - Priority: 3, - JSONPath: ".status.conditions[?(@.type==\"Ready\")].status", - }, - }, - parseErr: nil, - }, - { - name: "age as date", - tag: "+kubebuilder:printcolumn:name=age,type=date,JSONPath=.metadata.creationTimestamp", - expected: []v1beta1.CustomResourceColumnDefinition{ - { - Name: "age", - Type: "date", - JSONPath: ".metadata.creationTimestamp", - }, - }, - parseErr: nil, - }, - { - name: "Minimum Three parameters", - tag: "+kubebuilder:printcolumn:name=toy,type=string,JSONPath=.status.conditions[?(@.type==\"Ready\")].status", - expected: []v1beta1.CustomResourceColumnDefinition{ - { - Name: "toy", - Type: "string", - JSONPath: ".status.conditions[?(@.type==\"Ready\")].status", - }, - }, - parseErr: nil, - }, - { - name: "two parameters", - tag: "+kubebuilder:printcolumn:name=toy,type=string", - expected: []v1beta1.CustomResourceColumnDefinition{}, - parseErr: fmt.Errorf(printColumnError), - }, - { - name: "one requied parameter is missing", - tag: "+kubebuilder:printcolumn:name=toy,type=string,description=.status.conditions[?(@.type==\"Ready\")].status", - expected: []v1beta1.CustomResourceColumnDefinition{}, - parseErr: fmt.Errorf(printColumnError), - }, - { - name: "Invalid value for priority", - tag: "+kubebuilder:printcolumn:name=toy,type=string,description=.status.conditions[?(@.type==\"Ready\")].status,priority=1.23", - expected: []v1beta1.CustomResourceColumnDefinition{}, - parseErr: fmt.Errorf("invalid value for %s printcolumn", printColumnPri), - }, - { - name: "Invalid value for format", - tag: "+kubebuilder:printcolumn:name=toy,type=string,description=.status.conditions[?(@.type==\"Ready\")].status,format=float", - expected: []v1beta1.CustomResourceColumnDefinition{}, - parseErr: fmt.Errorf("invalid value for %s printcolumn", printColumnFormat), - }, - { - name: "Invalid value for type", - tag: "+kubebuilder:printcolumn:name=toy,type=int32,description=.status.conditions[?(@.type==\"Ready\")].status", - expected: []v1beta1.CustomResourceColumnDefinition{}, - parseErr: fmt.Errorf("invalid value for %s printcolumn", printColumnType), - }, - } - for _, tc := range tests { - t.Logf("test case: %s", tc.name) - r := &types.Type{} - r.CommentLines = []string{tc.tag} - res, err := parsePrintColumnParams(r) - if !reflect.DeepEqual(err, tc.parseErr) { - t.Errorf("test [%s] failed. error is (%v),\n but expected (%v)", tc.name, err, tc.parseErr) - } - if !reflect.DeepEqual(res, tc.expected) { - t.Errorf("test [%s] failed. result is (%v),\n but expected (%v)", tc.name, res, tc.expected) - } - } -} diff --git a/pkg/internal/codegen/types.go b/pkg/internal/codegen/types.go deleted file mode 100644 index e4fda4c7e..000000000 --- a/pkg/internal/codegen/types.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package codegen - -import ( - "sort" - - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/types" -) - -// APIs is the information of a collection of API -type APIs struct { - // Domain is the domain portion of the group - e.g. k8s.io - Domain string - - // Package is the name of the root API package - e.g. github.com/my-org/my-repo/pkg/apis - Package string - - // Pkg the Package for the root API package - Pkg *types.Package - - // Groups is the list of API groups found under the apis package - Groups map[string]*APIGroup - - Rules []rbacv1.PolicyRule - - Informers map[v1.GroupVersionKind]bool -} - -// GetRules get rules of the APIs -func (apis *APIs) GetRules() []rbacv1.PolicyRule { - rules := []rbacv1.PolicyRule{} - rulesIndex := map[v1.GroupResource]sets.String{} - for _, rule := range apis.Rules { - for _, g := range rule.APIGroups { - for _, r := range rule.Resources { - gr := v1.GroupResource{ - Group: g, - Resource: r, - } - if _, found := rulesIndex[gr]; !found { - rulesIndex[gr] = sets.NewString() - } - rulesIndex[gr].Insert(rule.Verbs...) - } - } - } - for gr, v := range rulesIndex { - verbs := v.List() - sort.Strings(verbs) - rule := rbacv1.PolicyRule{ - Resources: []string{gr.Resource}, - APIGroups: []string{gr.Group}, - Verbs: verbs, - } - rules = append(rules, rule) - } - return rules -} - -// APIGroup contains information of an API group. -type APIGroup struct { - // Package is the name of the go package the api group is under - e.g. github.com/me/apiserver-helloworld/apis - Package string - // Domain is the domain portion of the group - e.g. k8s.io - Domain string - // Group is the short name of the group - e.g. mushroomkingdom - Group string - GroupTitle string - // Versions is the list of all versions for this group keyed by name - Versions map[string]*APIVersion - - UnversionedResources map[string]*APIResource - - // Structs is a list of unversioned definitions that must be generated - Structs []*Struct - Pkg *types.Package - PkgPath string -} - -// Struct contains information of a struct. -type Struct struct { - // Name is the name of the type - Name string - // genClient - GenClient bool - GenDeepCopy bool - NonNamespaced bool - - GenUnversioned bool - // Fields is the list of fields appearing in the struct - Fields []*Field -} - -// Field contains information of a field. -type Field struct { - // Name is the name of the field - Name string - // For versioned Kubernetes types, this is the versioned package - VersionedPackage string - // For versioned Kubernetes types, this is the unversioned package - UnversionedImport string - UnversionedType string -} - -// APIVersion contains information of an API version. -type APIVersion struct { - // Domain is the group domain - e.g. k8s.io - Domain string - // Group is the group name - e.g. mushroomkingdom - Group string - // Version is the api version - e.g. v1beta1 - Version string - // Resources is a list of resources appearing in the API version keyed by name - Resources map[string]*APIResource - // Pkg is the Package object from code-gen - Pkg *types.Package -} - -// APIResource contains information of an API resource. -type APIResource struct { - // Domain is the group domain - e.g. k8s.io - Domain string - // Group is the group name - e.g. mushroomkingdom - Group string - // Version is the api version - e.g. v1beta1 - Version string - // Kind is the resource name - e.g. PeachesCastle - Kind string - // Resource is the resource name - e.g. peachescastles - Resource string - // REST is the rest.Storage implementation used to handle requests - // This field is optional. The standard REST implementation will be used - // by default. - REST string - // Subresources is a map of subresources keyed by name - Subresources map[string]*APISubresource - // Type is the Type object from code-gen - Type *types.Type - // Strategy is name of the struct to use for the strategy - Strategy string - // Strategy is name of the struct to use for the strategy - StatusStrategy string - // NonNamespaced indicates that the resource kind is non namespaced - NonNamespaced bool - - ShortName string - - JSONSchemaProps v1beta1.JSONSchemaProps - CRD v1beta1.CustomResourceDefinition - Validation string - ValidationComments string - // DocAnnotation is a map of annotations by name for doc. e.g. warning, notes message - DocAnnotation map[string]string - // Categories is a list of categories the resource is part of. - Categories []string -} - -// APISubresource contains information of an API subresource. -type APISubresource struct { - // Domain is the group domain - e.g. k8s.io - Domain string - // Group is the group name - e.g. mushroomkingdom - Group string - // Version is the api version - e.g. v1beta1 - Version string - // Kind is the resource name - e.g. PeachesCastle - Kind string - // Resource is the resource name - e.g. peachescastles - Resource string - // Request is the subresource request type - e.g. ScaleCastle - Request string - // REST is the rest.Storage implementation used to handle requests - REST string - // Path is the subresource path - e.g. scale - Path string - - // ImportPackage is the import statement that must appear for the Request - ImportPackage string - - // RequestType is the type of the request - RequestType *types.Type - - // RESTType is the type of the request handler - RESTType *types.Type -} - -// Controller contains information of a controller. -type Controller struct { - Target schema.GroupVersionKind - Resource string - Pkg *types.Package - Repo string -} diff --git a/pkg/internal/general/util.go b/pkg/internal/general/util.go deleted file mode 100644 index afa889e36..000000000 --- a/pkg/internal/general/util.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package general - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "os" - "path/filepath" - "strings" -) - -// isGoFile filters files from parsing. -func isGoFile(f os.FileInfo) bool { - // ignore non-Go or Go test files - name := f.Name() - return !f.IsDir() && - !strings.HasPrefix(name, ".") && - !strings.HasSuffix(name, "_test.go") && - strings.HasSuffix(name, ".go") -} - -// GetAnnotation extracts the annotation from comment text. -// It will return "foo" for comment "+kubebuilder:webhook:foo" . -func GetAnnotation(c, name string) string { - prefix := fmt.Sprintf("+%s:", name) - if strings.HasPrefix(c, prefix) { - return strings.TrimPrefix(c, prefix) - } - return "" -} - -// ParseKV parses key-value string formatted as "foo=bar" and returns key and value. -func ParseKV(s string) (key, value string, err error) { - kv := strings.Split(s, "=") - if len(kv) != 2 { - err = fmt.Errorf("invalid key value pair") - return key, value, err - } - key, value = kv[0], kv[1] - if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { - value = value[1 : len(value)-1] - } - return key, value, err -} - -// ParseDir parses the Go files under given directory and parses the annotation by -// invoking the parseFn function on each comment group (multi-lines comments). -// TODO(droot): extend it to multiple dirs -func ParseDir(dir string, parseFn func(string) error) error { - fset := token.NewFileSet() - - err := filepath.Walk(dir, - func(path string, info os.FileInfo, _ error) error { - if !isGoFile(info) { - // TODO(droot): enable this output based on verbose flag - // fmt.Println("skipping non-go file", path) - return nil - } - return ParseFile(fset, path, nil, parseFn) - }) - return err -} - -// ParseFile parses given filename or content src and parses annotations by -// invoking the parseFn function on each comment group (multi-lines comments). -func ParseFile(fset *token.FileSet, filename string, src interface{}, parseFn func(string) error) error { - f, err := parser.ParseFile(fset, filename, src, parser.ParseComments) - if err != nil { - fmt.Printf("error from parse.ParseFile: %v", err) - return err - } - - // using commentMaps here because it sanitizes the comment text by removing - // comment markers, compresses newlines etc. - cmap := ast.NewCommentMap(fset, f, f.Comments) - - for _, commentGroup := range cmap.Comments() { - err = parseFn(commentGroup.Text()) - if err != nil { - fmt.Print("error when parsing annotation") - return err - } - } - return nil -} diff --git a/pkg/rbac/manifests.go b/pkg/rbac/manifests.go deleted file mode 100644 index 629480ec1..000000000 --- a/pkg/rbac/manifests.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbac - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/ghodss/yaml" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -// ManifestOptions represent options for generating the RBAC manifests. -type ManifestOptions struct { - InputDir string - OutputDir string - RoleFile string - BindingFile string - Name string - ServiceAccount string - Namespace string - Labels map[string]string -} - -// SetDefaults sets up the default options for RBAC Manifest generator. -func (o *ManifestOptions) SetDefaults() { - o.Name = "manager" - o.InputDir = filepath.Join(".", "pkg") - o.OutputDir = filepath.Join(".", "config", "rbac") - o.ServiceAccount = "default" - o.Namespace = "system" -} - -// RoleName returns the RBAC role name to be used in the manifests. -func (o *ManifestOptions) RoleName() string { - return o.Name + "-role" -} - -// RoleFileName returns the name of the manifest file to use for the role. -func (o *ManifestOptions) RoleFileName() string { - if len(o.RoleFile) == 0 { - return o.Name + "_role.yaml" - } - // TODO: validate file name - return o.RoleFile -} - -// RoleBindingName returns the RBAC role binding name to be used in the manifests. -func (o *ManifestOptions) RoleBindingName() string { - return o.Name + "-rolebinding" -} - -// RoleBindingFileName returns the name of the manifest file to use for the role binding. -func (o *ManifestOptions) RoleBindingFileName() string { - if len(o.BindingFile) == 0 { - return o.Name + "_role_binding.yaml" - } - // TODO: validate file name - return o.BindingFile -} - -// Validate validates the input options. -func (o *ManifestOptions) Validate() error { - if _, err := os.Stat(o.InputDir); err != nil { - return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err) - } - return nil -} - -// Generate generates RBAC manifests by parsing the RBAC annotations in Go source -// files specified in the input directory. -func Generate(o *ManifestOptions) error { - if err := o.Validate(); err != nil { - return err - } - - ops := parserOptions{ - rules: []rbacv1.PolicyRule{}, - } - err := general.ParseDir(o.InputDir, ops.parseAnnotation) - if err != nil { - return fmt.Errorf("failed to parse the input dir %v", err) - } - if len(ops.rules) == 0 { - return nil - } - roleManifest, err := getClusterRoleManifest(ops.rules, o) - if err != nil { - return fmt.Errorf("failed to generate role manifest %v", err) - } - - roleBindingManifest, err := getClusterRoleBindingManifest(o) - if err != nil { - return fmt.Errorf("failed to generate role binding manifests %v", err) - } - - err = os.MkdirAll(o.OutputDir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create output dir %v", err) - } - roleManifestFile := filepath.Join(o.OutputDir, o.RoleFileName()) - if err := ioutil.WriteFile(roleManifestFile, roleManifest, 0666); err != nil { - return fmt.Errorf("failed to write role manifest YAML file %v", err) - } - - roleBindingManifestFile := filepath.Join(o.OutputDir, o.RoleBindingFileName()) - if err := ioutil.WriteFile(roleBindingManifestFile, roleBindingManifest, 0666); err != nil { - return fmt.Errorf("failed to write role manifest YAML file %v", err) - } - return nil -} - -func getClusterRoleManifest(rules []rbacv1.PolicyRule, o *ManifestOptions) ([]byte, error) { - role := rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRole", - APIVersion: "rbac.authorization.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.RoleName(), - Labels: o.Labels, - }, - Rules: rules, - } - return yaml.Marshal(role) -} - -func getClusterRoleBindingManifest(o *ManifestOptions) ([]byte, error) { - rolebinding := &rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRoleBinding", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.RoleBindingName(), - Labels: o.Labels, - }, - Subjects: []rbacv1.Subject{ - { - Name: o.ServiceAccount, - Namespace: o.Namespace, - Kind: "ServiceAccount", - }, - }, - RoleRef: rbacv1.RoleRef{ - Name: o.RoleName(), - Kind: "ClusterRole", - APIGroup: "rbac.authorization.k8s.io", - }, - } - return yaml.Marshal(rolebinding) -} diff --git a/pkg/util/util.go b/pkg/util/util.go deleted file mode 100644 index 9649913b3..000000000 --- a/pkg/util/util.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "io" - "log" - "os" - "path/filepath" - - "github.com/spf13/afero" -) - -// FileWriter is a io wrapper to write files -type FileWriter struct { - Fs afero.Fs -} - -// WriteCloser returns a WriteCloser to write to given path -func (fw *FileWriter) WriteCloser(path string) (io.Writer, error) { - if fw.Fs == nil { - fw.Fs = afero.NewOsFs() - } - dir := filepath.Dir(path) - err := fw.Fs.MkdirAll(dir, 0700) - if err != nil { - return nil, err - } - - fi, err := fw.Fs.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return nil, err - } - - return fi, nil -} - -// WriteFile write given content to the file path -func (fw *FileWriter) WriteFile(filePath string, content []byte) error { - if fw.Fs == nil { - fw.Fs = afero.NewOsFs() - } - f, err := fw.WriteCloser(filePath) - if err != nil { - return fmt.Errorf("failed to create %s: %v", filePath, err) - } - - if c, ok := f.(io.Closer); ok { - defer func() { - if err := c.Close(); err != nil { - log.Fatal(err) - } - }() - } - - _, err = f.Write(content) - if err != nil { - return fmt.Errorf("failed to write %s: %v", filePath, err) - } - - return nil -} From 778eaeacebaa1450585e858482fb730216c80159 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 21:56:10 -0700 Subject: [PATCH 05/14] Generator framework This adds a generator framework that ties together running multiple generators from the same marker data, and describe how they can read and write artifacts. --- pkg/genall/doc.go | 52 ++++++++++++++ pkg/genall/genall.go | 161 +++++++++++++++++++++++++++++++++++++++++++ pkg/genall/input.go | 37 ++++++++++ pkg/genall/output.go | 140 +++++++++++++++++++++++++++++++++++++ 4 files changed, 390 insertions(+) create mode 100644 pkg/genall/doc.go create mode 100644 pkg/genall/genall.go create mode 100644 pkg/genall/input.go create mode 100644 pkg/genall/output.go diff --git a/pkg/genall/doc.go b/pkg/genall/doc.go new file mode 100644 index 000000000..936b3de3c --- /dev/null +++ b/pkg/genall/doc.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package genall defines entrypoints for generation tools to hook into and +// share the same set of parsing, typechecking, and marker information. +// +// Generators +// +// Each Generator knows how to register its markers into a central Registry, +// and then how to generate output using a Collector and some root packages. +// Each generator can be considered to be the output type of a marker, for easy +// command line parsing. +// +// Output and Input +// +// Generators output artifacts via an OutputRule. OutputRules know how to +// write output for different package-associated (code) files, as well as +// config files. Each OutputRule should also be considered to be the output +// type as a marker, for easy command-line parsing. +// +// OutputRules groups together an OutputRule per generator, plus a default +// output rule for any not explicitly specified. +// +// OutputRules are defined for stdout, file writing, and sending to /dev/null +// (useful for doing "type-checking" without actually saving the results). +// +// InputRule defines custom input loading, but its shared across all +// Generators. There's currently only a filesystem implementation. +// +// Runtime and Context +// +// Runtime maps together Generators, and constructs "contexts" which provide +// the common collector and roots, plus the output rule for that generator, and +// a handle for reading files (like boilerplate headers). +// +// It will run all associated generators, printing errors and automatically +// skipping type-checking errors (since those are commonly caused by the +// partial type-checking of loader.TypeChecker). +package genall diff --git a/pkg/genall/genall.go b/pkg/genall/genall.go new file mode 100644 index 000000000..b818cb770 --- /dev/null +++ b/pkg/genall/genall.go @@ -0,0 +1,161 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "golang.org/x/tools/go/packages" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// Generators are a list of Generators. +type Generators []Generator + +// RegisterMarkers registers all markers defined by each of the Generators in +// this list into the given registry. +func (g Generators) RegisterMarkers(reg *markers.Registry) error { + for _, gen := range g { + if err := gen.RegisterMarkers(reg); err != nil { + return err + } + } + return nil +} + +// Generator knows how to register some set of markers, and then produce +// output artifacts based on loaded code containing those markers, +// sharing common loaded data. +type Generator interface { + // RegisterMarkers registers all markers needed by this Generator + // into the given registry. + RegisterMarkers(into *markers.Registry) error + // Generate generates artifacts produced by this marker. + // It's called *after* RegisterMarkers has been called. + Generate(*GenerationContext) error +} + +// Runtime collects generators, loaded program data (Collector, root Packages), +// and I/O rules, running them together. +type Runtime struct { + // Generators are the Generators to be run by this Runtime. + Generators Generators + // GenerationContext is the base generation context that's copied + // to produce the context for each Generator. + GenerationContext + // OutputRules defines how to output artifacts for each Generator. + OutputRules OutputRules +} + +// GenerationContext defines the common information needed for each Generator +// to run. +type GenerationContext struct { + // Collector is the shared marker collector. + Collector *markers.Collector + // Roots are the base packages to be processed. + Roots []*loader.Package + // Checker is the shared partial type-checker. + Checker *loader.TypeChecker + // OutputRule describes how to output artifacts. + OutputRule + // InputRule describes how to load associated boilerplate artifacts. + // It should *not* be used to load source files. + InputRule +} + +// WriteYAML writes the given object out, serialized as YAML, using the +// context's OutputRule. +func (g GenerationContext) WriteYAML(obj interface{}, itemPath string) error { + yamlContent, err := yaml.Marshal(obj) + if err != nil { + return err + } + + out, err := g.Open(nil, itemPath) + if err != nil { + return err + } + defer out.Close() + + n, err := out.Write(yamlContent) + if err != nil { + return err + } + if n < len(yamlContent) { + return io.ErrShortWrite + } + return nil +} + +// ReadFile reads the given boilerplate artifact using the context's InputRule. +func (g GenerationContext) ReadFile(path string) ([]byte, error) { + file, err := g.OpenForRead(path) + if err != nil { + return nil, err + } + defer file.Close() + return ioutil.ReadAll(file) +} + +// ForRoots produces a Runtime to run the given generators against the +// given packages. It outputs to /dev/null by default. +func (g Generators) ForRoots(rootPaths ...string) (*Runtime, error) { + roots, err := loader.LoadRoots(rootPaths...) + if err != nil { + return nil, err + } + rt := &Runtime{ + Generators: g, + GenerationContext: GenerationContext{ + Collector: &markers.Collector{ + Registry: &markers.Registry{}, + }, + Roots: roots, + InputRule: InputFromFileSystem, + Checker: &loader.TypeChecker{}, + }, + OutputRules: OutputRules{Default: OutputToNothing}, + } + if err := rt.Generators.RegisterMarkers(rt.Collector.Registry); err != nil { + return nil, err + } + return rt, nil +} + +// Run runs the Generators in this Runtime against its packages, printing +// errors (except type errors, which common result from using TypeChecker with +// filters), returning true if errors were found. +func (r *Runtime) Run() bool { + // TODO(directxman12): we could make this parallel, + // but we'd need to ensure all underlying machinery is threadsafe + for _, gen := range r.Generators { + ctx := r.GenerationContext // make a shallow copy + ctx.OutputRule = r.OutputRules.ForGenerator(gen) + if err := gen.Generate(&ctx); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + + // skip TypeErrors -- they're probably just from partial typechecking in crd-gen + return loader.PrintErrors(r.Roots, packages.TypeError) +} diff --git a/pkg/genall/input.go b/pkg/genall/input.go new file mode 100644 index 000000000..46e191c0c --- /dev/null +++ b/pkg/genall/input.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "io" + "os" +) + +// InputRule describes how to load non-code boilerplate artifacts. +// It's not used for loading code. +type InputRule interface { + // OpenForRead opens the given non-code artifact for reading. + OpenForRead(path string) (io.ReadCloser, error) +} +type inputFromFileSystem struct{} + +func (inputFromFileSystem) OpenForRead(path string) (io.ReadCloser, error) { + return os.Open(path) +} + +// InputFromFileSystem reads from the filesystem as normal. +var InputFromFileSystem = inputFromFileSystem{} diff --git a/pkg/genall/output.go b/pkg/genall/output.go new file mode 100644 index 000000000..025b8e5a8 --- /dev/null +++ b/pkg/genall/output.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// nopCloser is a WriteCloser whose Close +// is a no-op. +type nopCloser struct { + io.Writer +} + +func (n nopCloser) Close() error { + return nil +} + +// DirectoryPerGenerator produces output rules mapping output to a different subdirectory +// of the given base directory for each generator (with each subdirectory specified as +// the key in the input map). +func DirectoryPerGenerator(base string, generators map[string]Generator) OutputRules { + rules := OutputRules{ + Default: OutputArtifacts{Config: OutputToDirectory(base)}, + ByGenerator: make(map[Generator]OutputRule, len(generators)), + } + + for name, gen := range generators { + rules.ByGenerator[gen] = OutputArtifacts{ + Config: OutputToDirectory(filepath.Join(base, name)), + } + } + + return rules +} + +// OutputRules defines how to output artificats on a per-generator basis. +type OutputRules struct { + // Default is the output rule used when no specific per-generator overrides match. + Default OutputRule + // ByGenerator contains specific per-generator overrides. + ByGenerator map[Generator]OutputRule +} + +// ForGenerator returns the output rule that should be used +// by the given Generator. +func (o OutputRules) ForGenerator(gen Generator) OutputRule { + if forGen, specific := o.ByGenerator[gen]; specific { + return forGen + } + return o.Default +} + +// OutputRule defines how to output artifacts from a generator. +type OutputRule interface { + // Open opens the given artifact path for writing. If a package is passed, + // the artifact is considered to be used as part of the package (e.g. + // generated code), while a nil package indicates that the artifact is + // config (or something else not involved in Go compilation). + Open(pkg *loader.Package, path string) (io.WriteCloser, error) +} + +// OutputToNothing skips outputting anything. +var OutputToNothing = outputToNothing{} + +type outputToNothing struct{} + +func (o outputToNothing) Open(_ *loader.Package, _ string) (io.WriteCloser, error) { + return nopCloser{ioutil.Discard}, nil +} + +// OutputToDirectory outputs each artifact to the given directory, regardless +// of if it's package-associated or not. +type OutputToDirectory string + +func (o OutputToDirectory) Open(_ *loader.Package, itemPath string) (io.WriteCloser, error) { + // ensure the directory exists + if err := os.MkdirAll(string(o), os.ModePerm); err != nil { + return nil, err + } + path := filepath.Join(string(o), itemPath) + return os.Create(path) +} + +// OutputToStdout outputs everything to standard-out, with no separation. +// Generally useful for single-artifact outputs. +var OutputToStdout = outputToStdout{} + +type outputToStdout struct{} + +func (o outputToStdout) Open(_ *loader.Package, itemPath string) (io.WriteCloser, error) { + return nopCloser{os.Stdout}, nil +} + +// OutputArtifacts outputs artifacts to different locations, depending on +// whether they're package-associated or not. Non-package associated artifacts +// are output to the Config directory, while package-associated ones are output +// to their package's source files' directory, unless an alternate path is +// specified in Code. +type OutputArtifacts struct { + Config OutputToDirectory + Code OutputToDirectory `marker:",optional"` +} + +func (o OutputArtifacts) Open(pkg *loader.Package, itemPath string) (io.WriteCloser, error) { + if pkg == nil { + return o.Config.Open(pkg, itemPath) + } + + if o.Code != "" { + return o.Code.Open(pkg, itemPath) + } + + if len(pkg.CompiledGoFiles) == 0 { + return nil, fmt.Errorf("cannot ouput to a package with no path on disk") + } + outDir := filepath.Dir(pkg.CompiledGoFiles[0]) + outPath := filepath.Join(outDir, itemPath) + return os.Create(outPath) +} From 6ec0406907cacf4c0e9dc1c477ea7fd87f28273b Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 21:57:23 -0700 Subject: [PATCH 06/14] RBAC Generation This adds RBAC generation (as per our existing RBAC markers) on top of the new generator framework. --- pkg/rbac/parser.go | 119 ++++++++++++++++++++++---------------- pkg/rbac/parser_test.go | 124 ---------------------------------------- 2 files changed, 69 insertions(+), 174 deletions(-) delete mode 100644 pkg/rbac/parser_test.go diff --git a/pkg/rbac/parser.go b/pkg/rbac/parser.go index d28cfd9e6..d0560cefd 100644 --- a/pkg/rbac/parser.go +++ b/pkg/rbac/parser.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,69 +15,88 @@ limitations under the License. */ // Package rbac contain libraries for generating RBAC manifests from RBAC -// annotations in Go source files. +// markers in Go source files. +// +// The markers take the form: +// +// +kubebuilder:rbac:groups=,resources=,verbs=,urls= package rbac import ( - "log" - "strings" - rbacv1 "k8s.io/api/rbac/v1" - "sigs.k8s.io/controller-tools/pkg/internal/general" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" ) -type parserOptions struct { - rules []rbacv1.PolicyRule +var ( + // RuleDefinition is a marker for defining RBAC rules. + // Call ToRule on the value to get a Kubernetes RBAC policy rule. + RuleDefinition = markers.Must(markers.MakeDefinition("kubebuilder:rbac", markers.DescribesPackage, Rule{})) +) + +// Rule is a marker value that describes a kubernetes RBAC rule. +type Rule struct { + Groups []string `marker:",optional"` + Resources []string `marker:",optional"` + Verbs []string + URLs []string `marker:"urls,optional"` } -// parseAnnotation parses RBAC annotations -func (o *parserOptions) parseAnnotation(commentText string) error { - for _, comment := range strings.Split(commentText, "\n") { - comment := strings.TrimSpace(comment) - if strings.HasPrefix(comment, "+rbac") { - if ann := general.GetAnnotation(comment, "rbac"); ann != "" { - o.rules = append(o.rules, parseRBACTag(ann)) - } - } - if strings.HasPrefix(comment, "+kubebuilder:rbac") { - if ann := general.GetAnnotation(comment, "kubebuilder:rbac"); ann != "" { - o.rules = append(o.rules, parseRBACTag(ann)) - } +// ToRule converts this rule to its Kubernetes API form. +func (r Rule) ToRule() rbacv1.PolicyRule { + // fix the group names first, since letting people type "core" is nice + for i, group := range r.Groups { + if group == "core" { + r.Groups[i] = "" } } - return nil + return rbacv1.PolicyRule{ + APIGroups: r.Groups, + Verbs: r.Verbs, + Resources: r.Resources, + NonResourceURLs: r.URLs, + } +} + +// Generator is a genall.Generator that generated RBAC manifests.. +type Generator struct { + RoleName string } -// parseRBACTag parses the given RBAC annotation in to an RBAC PolicyRule. -// This is copied from Kubebuilder code. -func parseRBACTag(tag string) rbacv1.PolicyRule { - result := rbacv1.PolicyRule{} - for _, elem := range strings.Split(tag, ",") { - key, value, err := general.ParseKV(elem) +func (Generator) RegisterMarkers(into *markers.Registry) error { + return into.Register(RuleDefinition) +} +func (g Generator) Generate(ctx *genall.GenerationContext) error { + var rules []rbacv1.PolicyRule + for _, root := range ctx.Roots { + markerSet, err := markers.PackageMarkers(ctx.Collector, root) if err != nil { - log.Fatalf("// +kubebuilder:rbac: tags must be key value pairs. Expected "+ - "keys [groups=,resources=,verbs=] "+ - "Got string: [%s]", tag) + root.AddError(err) } - values := strings.Split(value, ";") - switch key { - case "groups": - normalized := []string{} - for _, v := range values { - if v == "core" { - normalized = append(normalized, "") - } else { - normalized = append(normalized, v) - } - } - result.APIGroups = normalized - case "resources": - result.Resources = values - case "verbs": - result.Verbs = values - case "urls": - result.NonResourceURLs = values + + for _, rule := range markerSet[RuleDefinition.Name] { + rules = append(rules, rule.(Rule).ToRule()) } } - return result + + if len(rules) == 0 { + return nil + } + + if err := ctx.WriteYAML(rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: g.RoleName, + }, + Rules: rules, + }, "role.yaml"); err != nil { + return err + } + + return nil } diff --git a/pkg/rbac/parser_test.go b/pkg/rbac/parser_test.go deleted file mode 100644 index 30066770b..000000000 --- a/pkg/rbac/parser_test.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbac - -import ( - "go/token" - "reflect" - "testing" - - rbacv1 "k8s.io/api/rbac/v1" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -func TestParseFile(t *testing.T) { - tests := []struct { - content string - exp []rbacv1.PolicyRule - }{ - { - content: `package foo - import ( - "fmt" - "time" - ) - - // RBAC annotation with kubebuilder prefix - // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;delete - // bar function - func bar() { - fmt.Println(time.Now()) - }`, - exp: []rbacv1.PolicyRule{{ - Verbs: []string{"get", "list", "watch", "delete"}, - APIGroups: []string{"apps"}, - Resources: []string{"deployments"}, - }}, - }, - { - content: `package foo - import ( - "fmt" - "time" - ) - - // RBAC annotation without kuebuilder prefix - // +rbac:groups=apps,resources=deployments,verbs=get;list;watch;delete - // bar function - func bar() { - fmt.Println(time.Now()) - }`, - exp: []rbacv1.PolicyRule{{ - Verbs: []string{"get", "list", "watch", "delete"}, - APIGroups: []string{"apps"}, - Resources: []string{"deployments"}, - }}, - }, - { - content: `package foo - import ( - "fmt" - "time" - ) - - // RBAC annotation with kubebuilder prefix - // +kubebuilder:rbac:groups=,resources=pods,verbs=get;list;watch;delete - // bar function - func bar() { - fmt.Println(time.Now()) - }`, - exp: []rbacv1.PolicyRule{{ - Verbs: []string{"get", "list", "watch", "delete"}, - APIGroups: []string{""}, - Resources: []string{"pods"}, - }}, - }, - { - content: `package foo - import ( - "fmt" - "time" - ) - - // RBAC annotation with kubebuilder prefix - // +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;delete - // bar function - func bar() { - fmt.Println(time.Now()) - }`, - exp: []rbacv1.PolicyRule{{ - Verbs: []string{"get", "list", "watch", "delete"}, - APIGroups: []string{""}, - Resources: []string{"pods"}, - }}, - }, - } - - for _, test := range tests { - fset := token.NewFileSet() - ops := parserOptions{ - rules: []rbacv1.PolicyRule{}, - } - err := general.ParseFile(fset, "test.go", test.content, ops.parseAnnotation) - if err != nil { - t.Errorf("processFile should have succeeded, but got error: %v", err) - } - if !reflect.DeepEqual(ops.rules, test.exp) { - t.Errorf("RBAC rules should have matched, expected %v and got %v", test.exp, ops.rules) - } - } -} From c264aec75441428c4877e1d8a3507e74aa855b7c Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 22:59:20 -0700 Subject: [PATCH 07/14] CRD generation This defines the framework for CRD generation, and uses markers with that implement interfaces to modify the schema and CRD. --- pkg/crd/doc.go | 63 ++++++ pkg/crd/flatten.go | 263 +++++++++++++++++++++++ pkg/crd/gen.go | 139 ++++++++++++ pkg/crd/known_types.go | 75 +++++++ pkg/crd/markers/crd.go | 176 +++++++++++++++ pkg/crd/markers/doc.go | 46 ++++ pkg/crd/markers/package.go | 28 +++ pkg/crd/markers/register.go | 53 +++++ pkg/crd/markers/validation.go | 209 ++++++++++++++++++ pkg/crd/parser.go | 208 ++++++++++++++++++ pkg/crd/schema.go | 393 ++++++++++++++++++++++++++++++++++ pkg/crd/schema_visitor.go | 104 +++++++++ pkg/crd/spec.go | 141 ++++++++++++ 13 files changed, 1898 insertions(+) create mode 100644 pkg/crd/doc.go create mode 100644 pkg/crd/flatten.go create mode 100644 pkg/crd/gen.go create mode 100644 pkg/crd/known_types.go create mode 100644 pkg/crd/markers/crd.go create mode 100644 pkg/crd/markers/doc.go create mode 100644 pkg/crd/markers/package.go create mode 100644 pkg/crd/markers/register.go create mode 100644 pkg/crd/markers/validation.go create mode 100644 pkg/crd/parser.go create mode 100644 pkg/crd/schema.go create mode 100644 pkg/crd/schema_visitor.go create mode 100644 pkg/crd/spec.go diff --git a/pkg/crd/doc.go b/pkg/crd/doc.go new file mode 100644 index 000000000..fd48e44ae --- /dev/null +++ b/pkg/crd/doc.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package crd contains utilities for generating CustomResourceDefinitions and +// their corresponding OpenAPI validation schemata. +// +// Markers +// +// Markers live under the markers subpackage. Two types of markers exist: +// those that modify schema generation (for validation), and those that modify +// the rest of the CRD. See the subpackage for more information and all +// supported markers. +// +// Collecting Types and Generating CRDs +// +// The Parser is the entrypoint for collecting the information required to +// generate CRDs. Like loader and collector, its methods are idemptotent, not +// doing extra work if called multiple times. +// +// Parser's method start with Need. Calling NeedXYZ indicates that XYZ should +// be made present in the eqivalent field in the Parser, where it can then be +// loaded from. Each Need method will in turn call Need on anything it needs. +// +// In general, root packages should first be loaded into the Parser with +// NeedPackage. Then, CRDs can be generated with NeedCRDFor. +// +// Errors are generally attached directly to the relevant Package with +// AddError. +// +// Known Packages +// +// There are a few types from Kubernetes that have special meaning, but don't +// have validation markers attached. Those specific types have overrides +// listed in KnownPackages that can be added as overrides to any parser. +// +// Flattening +// +// Once schemata are generated, they can be used directly by external tooling +// (like JSONSchema validators), but must first be "flattened" to not contain +// references before use in a CRD (Kubernetes doesn't allow references in the +// CRD's validation schema). +// +// The Flattener built in to the Parser takes care of flattening out references +// when requesting the CRDs, but can be invoked manually. It will not modify +// the input schemata. +// +// Flattened schemata may further be passed to FlattenEmbedded to remove the +// use of AllOf (which is used to describe embedded struct fields when +// references are in use). This done automatically when fetching CRDs. +package crd diff --git a/pkg/crd/flatten.go b/pkg/crd/flatten.go new file mode 100644 index 000000000..91ca3b0c9 --- /dev/null +++ b/pkg/crd/flatten.go @@ -0,0 +1,263 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "strings" + "sync" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// flattenAllOfInto copies properties from src to dst, then copies the properties +// of each item in src's allOf to dst's properties as well. +func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps) { + if len(src.AllOf) > 0 { + for _, embedded := range src.AllOf { + flattenAllOfInto(dst, embedded) + } + } + + for propName, prop := range src.Properties { + dst.Properties[propName] = prop + } + for _, propName := range src.Required { + dst.Required = append(dst.Required, propName) + } +} + +// allOfVisitor recursively visits allOf fields in the schema, +// merging nested allOf properties into the root schema. +type allOfVisitor struct{} + +func (v allOfVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { + if schema == nil { + return v + } + var outAllOf []apiext.JSONSchemaProps + for _, embedded := range schema.AllOf { + if embedded.Ref != nil && len(*embedded.Ref) > 0 { + outAllOf = append(outAllOf, embedded) + continue + } + // NB(directxman12): only certain schemata are possible here + // (only a normal struct can be embedded), so we only need + // to deal with properties, allof, and required + if len(embedded.Properties) == 0 && len(embedded.AllOf) == 0 { + outAllOf = append(outAllOf, embedded) + continue + } + flattenAllOfInto(schema, embedded) + } + schema.AllOf = outAllOf + return v +} + +// NB(directxman12): FlattenEmbedded is separate from Flattener because +// some tooling wants to flatten out embedded fields, but only actually +// flatten a few specific types first. + +// FlattenEmbedded flattens embedded fields (represented via AllOf) which have +// already had their references resolved into simple properties in the containing +// schema. +func FlattenEmbedded(schema *apiext.JSONSchemaProps) *apiext.JSONSchemaProps { + outSchema := schema.DeepCopy() + EditSchema(outSchema, allOfVisitor{}) + return outSchema +} + +// Flattener knows how to take a root type, and flatten all references in it +// into a single, flat type. Flattened types are cached, so it's relatively +// cheap to make repeated calls with the same type. +type Flattener struct { + // Parser is used to lookup package and type details, and parse in new packages. + Parser *Parser + + // flattenedTypes hold the flattened version of each seen type for later reuse. + flattenedTypes map[TypeIdent]apiext.JSONSchemaProps + initOnce sync.Once +} + +func (f *Flattener) init() { + f.initOnce.Do(func() { + f.flattenedTypes = make(map[TypeIdent]apiext.JSONSchemaProps) + }) +} + +// cacheType saves the flattened version of the given type for later reuse +func (f *Flattener) cacheType(typ TypeIdent, schema apiext.JSONSchemaProps) { + f.init() + f.flattenedTypes[typ] = schema +} + +// loadUnflattenedSchema fetches a fresh, unflattened schema from the parser. +func (f *Flattener) loadUnflattenedSchema(typ TypeIdent) (*apiext.JSONSchemaProps, error) { + f.Parser.NeedSchemaFor(typ) + + baseSchema, found := f.Parser.Schemata[typ] + if !found { + return nil, fmt.Errorf("unable to locate schema for type %s", typ) + } + return &baseSchema, nil +} + +// FlattenType flattens the given pre-loaded type, removing any references from it. +// It deep-copies the schema first, so it won't affect the parser's version of the schema. +func (f *Flattener) FlattenType(typ TypeIdent) *apiext.JSONSchemaProps { + f.init() + if cachedSchema, isCached := f.flattenedTypes[typ]; isCached { + return &cachedSchema + } + baseSchema, err := f.loadUnflattenedSchema(typ) + if err != nil { + typ.Package.AddError(err) + return nil + } + resSchema := f.FlattenSchema(*baseSchema, typ.Package) + f.cacheType(typ, *resSchema) + return resSchema +} + +// FlattenSchema flattens the given schema, removing any references. +// It deep-copies the schema first, so the input schema won't be affected. +func (f *Flattener) FlattenSchema(baseSchema apiext.JSONSchemaProps, currentPackage *loader.Package) *apiext.JSONSchemaProps { + resSchema := baseSchema.DeepCopy() + EditSchema(resSchema, &flattenVisitor{ + Flattener: f, + currentPackage: currentPackage, + }) + + return resSchema +} + +// identFromRef converts the given schema ref from the given package back +// into the TypeIdent that it represents. +func identFromRef(ref string, contextPkg *loader.Package) (TypeIdent, error) { + if !strings.HasPrefix(ref, defPrefix) { + return TypeIdent{}, fmt.Errorf("non-standard reference link %q", ref) + } + ref = ref[len(defPrefix):] + // decode the json pointer encodings + ref = strings.Replace(ref, "~1", "/", -1) + ref = strings.Replace(ref, "~0", "~", -1) + nameParts := strings.SplitN(ref, "~", 2) + if len(nameParts) == 1 { + // a local reference + return TypeIdent{ + Name: nameParts[0], + Package: contextPkg, + }, nil + } + + // an external reference + return TypeIdent{ + Name: nameParts[1], + Package: contextPkg.Imports()[nameParts[0]], + }, nil +} + +// preserveFields copies documentation fields from src into dst, preserving +// field-level documentation when flattening. +func preserveFields(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps) { + // TODO(directxman12): preserve both description fields? + dst.Description = src.Description + dst.Title = src.Title + dst.Example = src.Example + if src.Example != nil { + dst.Example = src.Example + } + if src.ExternalDocs != nil { + dst.ExternalDocs = src.ExternalDocs + } + + // TODO(directxman12): copy over other fields? +} + +// flattenVisitor visits each node in the schema, recursively flattening references. +type flattenVisitor struct { + *Flattener + + currentPackage *loader.Package + currentType *TypeIdent + currentSchema *apiext.JSONSchemaProps +} + +func (f *flattenVisitor) Visit(baseSchema *apiext.JSONSchemaProps) SchemaVisitor { + if baseSchema == nil { + // end-of-node marker, cache the results + if f.currentType != nil { + f.cacheType(*f.currentType, *f.currentSchema) + } + return f + } + + // if we get a type that's just a ref, resolve it + if baseSchema.Ref != nil && len(*baseSchema.Ref) > 0 { + // resolve this ref + refIdent, err := identFromRef(*baseSchema.Ref, f.currentPackage) + if err != nil { + f.currentPackage.AddError(err) + return nil + } + + // load and potentially flatten the schema + + // check the cache first... + if refSchemaCached, isCached := f.flattenedTypes[refIdent]; isCached { + // shallow copy is fine, it's just to avoid overwriting the doc fields + preserveFields(&refSchemaCached, *baseSchema) + *baseSchema = refSchemaCached + return nil // don't recurse, we're done + } + + // ...otherwise, we need to flatten + refSchema, err := f.loadUnflattenedSchema(refIdent) + if err != nil { + f.currentPackage.AddError(err) + return nil + } + refSchema = refSchema.DeepCopy() + preserveFields(refSchema, *baseSchema) + *baseSchema = *refSchema + + // avoid loops (which shouldn't exist, but just in case) + // by marking a nil cached pointer before we start recursing + f.cacheType(refIdent, apiext.JSONSchemaProps{}) + + return &flattenVisitor{ + Flattener: f.Flattener, + + currentPackage: refIdent.Package, + currentType: &refIdent, + currentSchema: baseSchema, + } + } + + // otherwise, continue recursing... + if f.currentType != nil { + // ...but don't accidentally end this node early (for caching purposes) + return &flattenVisitor{ + Flattener: f.Flattener, + currentPackage: f.currentPackage, + } + } + + return f +} diff --git a/pkg/crd/gen.go b/pkg/crd/gen.go new file mode 100644 index 000000000..de8a5afeb --- /dev/null +++ b/pkg/crd/gen.go @@ -0,0 +1,139 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/types" + + "k8s.io/apimachinery/pkg/runtime/schema" + + crdmarkers "sigs.k8s.io/controller-tools/pkg/crd/markers" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// Generator is a genall.Generator that generates CRDs. +type Generator struct{} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + return crdmarkers.Register(into) +} +func (Generator) Generate(ctx *genall.GenerationContext) error { + parser := &Parser{ + Collector: ctx.Collector, + Checker: ctx.Checker, + } + + AddKnownTypes(parser) + for _, root := range ctx.Roots { + parser.NeedPackage(root) + } + + metav1Pkg := findMetav1(ctx.Roots) + if metav1Pkg == nil { + // no objects in the roots, since nothing imported metav1 + return nil + } + + // TODO: allow selecting a specific object + kubeKinds := findKubeKinds(parser, metav1Pkg) + if len(kubeKinds) == 0 { + // no objects in the roots + return nil + } + + for _, groupKind := range kubeKinds { + parser.NeedCRDFor(groupKind) + crd := parser.CustomResourceDefinitions[groupKind] + fileName := fmt.Sprintf("%s_%s.yaml", crd.Spec.Group, crd.Spec.Names.Plural) + if err := ctx.WriteYAML(crd, fileName); err != nil { + return err + } + } + + return nil +} + +// findMetav1 locates the actual package representing metav1 amongst +// the imports of the roots. +func findMetav1(roots []*loader.Package) *loader.Package { + for _, root := range roots { + pkg := root.Imports()["k8s.io/apimachinery/pkg/apis/meta/v1"] + if pkg != nil { + return pkg + } + } + return nil +} + +// findKubeKinds locates all types that contain TypeMeta and ObjectMeta +// (and thus may be a Kubernetes object), and returns the corresponding +// group-kinds. +func findKubeKinds(parser *Parser, metav1Pkg *loader.Package) []schema.GroupKind { + // TODO(directxman12): technically, we should be finding metav1 per-package + var kubeKinds []schema.GroupKind + for typeIdent, info := range parser.Types { + hasObjectMeta := false + hasTypeMeta := false + + pkg := typeIdent.Package + pkg.NeedTypesInfo() + typesInfo := pkg.TypesInfo + + for _, field := range info.Fields { + if field.Name != "" { + // type and object meta are embedded, + // so they can't be this + continue + } + + fieldType := typesInfo.TypeOf(field.RawField.Type) + namedField, isNamed := fieldType.(*types.Named) + if !isNamed { + // ObjectMeta and TypeMeta are named types + continue + } + fieldPkgPath := loader.NonVendorPath(namedField.Obj().Pkg().Path()) + fieldPkg := pkg.Imports()[fieldPkgPath] + if fieldPkg != metav1Pkg { + fmt.Printf("field %q's package is %q (%v), not metav1", field.Name, namedField.Obj().Pkg().Path(), fieldPkg) + continue + } + + switch namedField.Obj().Name() { + case "ObjectMeta": + hasObjectMeta = true + case "TypeMeta": + hasTypeMeta = true + } + } + + if !hasObjectMeta || !hasTypeMeta { + continue + } + + groupKind := schema.GroupKind{ + Group: parser.GroupVersions[pkg].Group, + Kind: typeIdent.Name, + } + kubeKinds = append(kubeKinds, groupKind) + } + + return kubeKinds +} diff --git a/pkg/crd/known_types.go b/pkg/crd/known_types.go new file mode 100644 index 000000000..7630222b3 --- /dev/null +++ b/pkg/crd/known_types.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package crd + +import ( + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// KnownPackages overrides types in some comment packages that have custom validation +// but don't have validation markers on them (since they're from core Kubernetes). +var KnownPackages = map[string]PackageOverride{ + + "k8s.io/apimachinery/pkg/apis/meta/v1": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "Time", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "string", + Format: "date-time", + } + p.Schemata[TypeIdent{Name: "Duration", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "string", + } + p.AddPackage(pkg) // get the rest of the types + }, + + "k8s.io/apimachinery/pkg/api/resource": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "Quantity", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "string", + } + // No point in calling AddPackage, this is the sole inhabitant + }, + + "k8s.io/apimachinery/pkg/runtime": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "RawExtension", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "object", + } + p.AddPackage(pkg) // get the rest of the types + }, + + "k8s.io/apimachinery/pkg/util/intstr": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "IntOrString", Package: pkg}] = apiext.JSONSchemaProps{ + AnyOf: []apiext.JSONSchemaProps{ + {Type: "string"}, + {Type: "integer"}, + }, + } + // No point in calling AddPackage, this is the sole inhabitant + }, +} + +// AddKnownTypes registers the packages overrides in KnownPackages with the given parser. +func AddKnownTypes(parser *Parser) { + // ensure everything is there before adding to PackageOverrides + // TODO(directxman12): this is a bit of a hack, maybe just use constructors? + parser.init() + for pkgName, override := range KnownPackages { + parser.PackageOverrides[pkgName] = override + } +} diff --git a/pkg/crd/markers/crd.go b/pkg/crd/markers/crd.go new file mode 100644 index 000000000..bf3f270d5 --- /dev/null +++ b/pkg/crd/markers/crd.go @@ -0,0 +1,176 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// CRDMarkers lists all markers that directly modify the CRD (not validation +// schemas). +var CRDMarkers = []*markers.Definition{ + markers.Must(markers.MakeDefinition("kubebuilder:subresource:status", markers.DescribesType, SubresourceStatus{})), + markers.Must(markers.MakeDefinition("kubebuilder:subresource:scale", markers.DescribesType, SubresourceScale{})), + markers.Must(markers.MakeDefinition("kubebuilder:printcolumn", markers.DescribesType, PrintColumn{})), + markers.Must(markers.MakeDefinition("kubebuilder:resource", markers.DescribesType, Resource{})), +} + +// TODO: categories and singular used to be annotations types +// TODO: doc +// TODO: nonNamespaced + +func init() { + AllDefinitions = append(AllDefinitions, CRDMarkers...) +} + +// SubresourceStatus defines "+kubebuilder:subresource:status" +type SubresourceStatus struct{} + +func (s SubresourceStatus) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var subresources *apiext.CustomResourceSubresources + if version == "" { + // single-version + if crd.Subresources == nil { + crd.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = crd.Subresources + } else { + // multi-version + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = ver.Subresources + break + } + } + if subresources == nil { + return fmt.Errorf("status subresource applied to version %q not in CRD", version) + } + subresources.Status = &apiext.CustomResourceSubresourceStatus{} + return nil +} + +// SubresourceScale defines "+kubebuilder:subresource:scale" +type SubresourceScale struct { + // marker names are leftover legacy cruft + SpecPath string `marker:"specpath"` + StatusPath string `marker:"statuspath"` + SelectorPath *string `marker:"selectorpath"` +} + +func (s SubresourceScale) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var subresources *apiext.CustomResourceSubresources + if version == "" { + // single-version + if crd.Subresources == nil { + crd.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = crd.Subresources + } else { + // multi-version + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = ver.Subresources + break + } + } + if subresources == nil { + return fmt.Errorf("scale subresource applied to version %q not in CRD", version) + } + subresources.Scale = &apiext.CustomResourceSubresourceScale{ + SpecReplicasPath: s.SpecPath, + StatusReplicasPath: s.StatusPath, + LabelSelectorPath: s.SelectorPath, + } + return nil +} + +// PrintColumn defines "+kubebuilder:printcolumn" +type PrintColumn struct { + Name string + Type string + JSONPath string `marker:"JSONPath"` // legacy cruft + Description string + Format string + Priority int32 +} + +func (s PrintColumn) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var columns *[]apiext.CustomResourceColumnDefinition + if version == "" { + columns = &crd.AdditionalPrinterColumns + } else { + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + columns = &ver.AdditionalPrinterColumns + break + } + } + if columns == nil { + return fmt.Errorf("printer columns applied to version %q not in CRD", version) + } + + *columns = append(*columns, apiext.CustomResourceColumnDefinition{ + Name: s.Name, + Type: s.Type, + JSONPath: s.JSONPath, + Description: s.Description, + Format: s.Format, + Priority: s.Priority, + }) + + return nil +} + +// Resource defines "+kubebuilder:resource" +type Resource struct { + Path string + ShortName []string `marker:",optional"` + Categories []string `marker:",optional"` + Singular string `marker:",optional"` +} + +func (s Resource) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + crd.Names.Plural = s.Path + crd.Names.ShortNames = s.ShortName + crd.Names.Categories = s.Categories + + return nil +} + +// NB(directxman12): singular was historically distinct, so we keep it here for backwards compat diff --git a/pkg/crd/markers/doc.go b/pkg/crd/markers/doc.go new file mode 100644 index 000000000..bfe68967e --- /dev/null +++ b/pkg/crd/markers/doc.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package markers defines markers for generating schema valiation +// and CRD structure. +// +// All markers related to CRD generation live in AllDefinitions. +// +// Validation Markers +// +// Validation markers have values that imprlement ApplyToSchema +// (crd.SchemaMarker). Any marker implementing this will automatically +// be run after the rest of a given schema node has been generated. +// Markers that need to be run before any other markers can also +// implement ApplyFirst, but this is discouraged and may change +// in the future. +// +// All validation markers start with "+kubebuilder:validation", and +// have the same name as their type name. +// +// CRD Markers +// +// Markers that modify anything in the CRD itself *except* for the schema +// implement ApplyToCRD (crd.CRDMarker). They are expected to detect whether +// they should apply themselves to a specific version in the CRD (as passed to +// them), or to the root-level CRD for legacy cases. They are applied *after* +// the rest of the CRD is computed. +// +// Misc +// +// This package also defines the "+groupName" and "+versionName" package-level +// markers, for defining package<->group-version mappings. +package markers diff --git a/pkg/crd/markers/package.go b/pkg/crd/markers/package.go new file mode 100644 index 000000000..ea49988a6 --- /dev/null +++ b/pkg/crd/markers/package.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func init() { + AllDefinitions = append(AllDefinitions, + markers.Must(markers.MakeDefinition("groupName", markers.DescribesPackage, "")), + markers.Must(markers.MakeDefinition("versionName", markers.DescribesPackage, "")), + ) +} diff --git a/pkg/crd/markers/register.go b/pkg/crd/markers/register.go new file mode 100644 index 000000000..ccea7d53b --- /dev/null +++ b/pkg/crd/markers/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "reflect" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// AllDefinitions contains all marker definitions for this package. +var AllDefinitions []*markers.Definition + +// mustMakeAllWithPrefix converts each object into a marker definition using +// the object's type's with the prefix to form the marker name. +func mustMakeAllWithPrefix(prefix string, target markers.TargetType, objs ...interface{}) []*markers.Definition { + defs := make([]*markers.Definition, len(objs)) + for i, obj := range objs { + name := prefix + ":" + reflect.TypeOf(obj).Name() + def, err := markers.MakeDefinition(name, target, obj) + if err != nil { + panic(err) + } + defs[i] = def + } + + return defs +} + +// Register registers all definitions for CRD generation to the given registry. +func Register(reg *markers.Registry) error { + for _, def := range AllDefinitions { + if err := reg.Register(def); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/crd/markers/validation.go b/pkg/crd/markers/validation.go new file mode 100644 index 000000000..7b3c1fe8c --- /dev/null +++ b/pkg/crd/markers/validation.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + + "encoding/json" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// ValidationMarkers lists all available markers that affect CRD schema generation. +// All markers start with `+kubebuilder:validation:`, and continue with their type name. +// A copy is produced of all markers that describes types as well, for making types +// reusable and writing complex validations on slice items. +var ValidationMarkers = mustMakeAllWithPrefix("kubebuilder:validation", markers.DescribesField, + + // integer markers + + Maximum(0), + Minimum(0), + ExclusiveMaximum(false), + ExclusiveMinimum(false), + MultipleOf(0), + + // string markers + + MaxLength(0), + MinLength(0), + Pattern(""), + + // slice markers + + MaxItems(0), + MinItems(0), + UniqueItems(false), + + // general markers + + Enum(nil), + Format(""), + Type(""), +) + +func init() { + AllDefinitions = append(AllDefinitions, ValidationMarkers...) + + for _, def := range ValidationMarkers { + typDef := *def + typDef.Target = markers.DescribesType + AllDefinitions = append(AllDefinitions, &typDef) + } +} + +type Maximum int +type Minimum int +type ExclusiveMinimum bool +type ExclusiveMaximum bool +type MultipleOf int + +type MaxLength int +type MinLength int +type Pattern string + +type MaxItems int +type MinItems int +type UniqueItems bool + +type Enum []interface{} +type Format string +type Type string + +func (m Maximum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply maximum to an integer") + } + val := float64(m) + schema.Maximum = &val + return nil +} +func (m Minimum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply minimum to an integer") + } + val := float64(m) + schema.Minimum = &val + return nil +} +func (m ExclusiveMaximum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply exclusivemaximum to an integer") + } + schema.ExclusiveMaximum = bool(m) + return nil +} +func (m ExclusiveMinimum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply exclusiveminimum to an integer") + } + schema.ExclusiveMinimum = bool(m) + return nil +} +func (m MultipleOf) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply multipleof to an integer") + } + val := float64(m) + schema.MultipleOf = &val + return nil +} + +func (m MaxLength) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply maxlength to a string") + } + val := int64(m) + schema.MaxLength = &val + return nil +} +func (m MinLength) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply minlength to a string") + } + val := int64(m) + schema.MinLength = &val + return nil +} +func (m Pattern) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply pattern to a string") + } + schema.Pattern = string(m) + return nil +} + +func (m MaxItems) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply maxitem to an array") + } + val := int64(m) + schema.MaxItems = &val + return nil +} +func (m MinItems) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply minitems to an array") + } + val := int64(m) + schema.MinItems = &val + return nil +} +func (m UniqueItems) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply uniqueitems to an array") + } + schema.UniqueItems = bool(m) + return nil +} + +func (m Enum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + // TODO(directxman12): this is a bit hacky -- we should + // probably support AnyType better + using the schema structure + vals := make([]v1beta1.JSON, len(m)) + for i, val := range m { + // TODO(directxman12): check actual type with schema type? + // if we're expecting a string, marshal the string properly... + // NB(directxman12): we use json.Marshal to ensure we handle JSON escaping properly + valMarshalled, err := json.Marshal(val) + if err != nil { + return err + } + vals[i] = v1beta1.JSON{Raw: valMarshalled} + } + schema.Enum = vals + return nil +} +func (m Format) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + schema.Format = string(m) + return nil +} + +// NB(directxman12): we "typecheck" on target schema properties here, +// which means the "Type" marker *must* be applied first. +// TODO(directxman12): find a less hacky way to do this +// (we could preserve ordering of markers, but that feels bad in its own right). + +func (m Type) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + schema.Type = string(m) + return nil +} + +func (m Type) ApplyFirst() {} diff --git a/pkg/crd/parser.go b/pkg/crd/parser.go new file mode 100644 index 000000000..ee88cc2bb --- /dev/null +++ b/pkg/crd/parser.go @@ -0,0 +1,208 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/ast" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// TypeIdent represents some type in a Package. +type TypeIdent struct { + Package *loader.Package + Name string +} + +func (t TypeIdent) String() string { + return fmt.Sprintf("%q.%s", t.Package.ID, t.Name) +} + +// PackageOverride overrides the loading of some package +// (potentially setting custom schemata, etc). It must +// call AddPackage if it wants to continue with the default +// loading behavior. +type PackageOverride func(p *Parser, pkg *loader.Package) + +// Parser knows how to parse out CRD information and generate +// OpenAPI schemata from some collection of types and markers. +// Most methods on Parser cache their results automatically, +// and thus may be called any number of times. +type Parser struct { + Collector *markers.Collector + + // Types contains the known TypeInfo for this parser. + Types map[TypeIdent]*markers.TypeInfo + // Schemata contains the known OpenAPI JSONSchemata for this parser. + Schemata map[TypeIdent]apiext.JSONSchemaProps + // GroupVersions contains the known group-versions of each package in this parser. + GroupVersions map[*loader.Package]schema.GroupVersion + // CustomResourceDefinitions contains the known CustomResourceDefinitions for types in this parser. + CustomResourceDefinitions map[schema.GroupKind]apiext.CustomResourceDefinition + + // PackageOverrides indicates that the loading of any package with + // the given path should be handled by the given overrider. + PackageOverrides map[string]PackageOverride + + // checker stores persistent partial type-checking/reference-traversal information. + Checker *loader.TypeChecker + // packages marks packages as loaded, to avoid re-loading them. + packages map[*loader.Package]struct{} + + flattener *Flattener +} + +func (p *Parser) init() { + if p.packages == nil { + p.packages = make(map[*loader.Package]struct{}) + } + if p.flattener == nil { + p.flattener = &Flattener{ + Parser: p, + } + } + if p.Schemata == nil { + p.Schemata = make(map[TypeIdent]apiext.JSONSchemaProps) + } + if p.Types == nil { + p.Types = make(map[TypeIdent]*markers.TypeInfo) + } + if p.PackageOverrides == nil { + p.PackageOverrides = make(map[string]PackageOverride) + } + if p.GroupVersions == nil { + p.GroupVersions = make(map[*loader.Package]schema.GroupVersion) + } + if p.CustomResourceDefinitions == nil { + p.CustomResourceDefinitions = make(map[schema.GroupKind]apiext.CustomResourceDefinition) + } +} + +// indexTypes loads all types in the package into Types. +func (p *Parser) indexTypes(pkg *loader.Package) { + // autodetect + pkgMarkers, err := markers.PackageMarkers(p.Collector, pkg) + if err != nil { + pkg.AddError(err) + } else if nameVal := pkgMarkers.Get("groupName"); nameVal != nil { + versionVal := pkg.Name // a reasonable guess + if versionMarker := pkgMarkers.Get("versionName"); versionMarker != nil { + versionVal = versionMarker.(string) + } + + p.GroupVersions[pkg] = schema.GroupVersion{ + Version: versionVal, + Group: nameVal.(string), + } + } + + if err := markers.EachType(p.Collector, pkg, func(info *markers.TypeInfo) { + ident := TypeIdent{ + Package: pkg, + Name: info.Name, + } + + p.Types[ident] = info + }); err != nil { + pkg.AddError(err) + } +} + +// LookupType fetches type info from Types. +func (p *Parser) LookupType(pkg *loader.Package, name string) *markers.TypeInfo { + return p.Types[TypeIdent{Package: pkg, Name: name}] +} + +// NeedSchemaFor indicates that a schema should be generated for the given type. +func (p *Parser) NeedSchemaFor(typ TypeIdent) { + p.init() + + p.NeedPackage(typ.Package) + if _, knownSchema := p.Schemata[typ]; knownSchema { + return + } + + info, knownInfo := p.Types[typ] + if !knownInfo { + typ.Package.AddError(fmt.Errorf("unknown type %s", typ)) + return + } + + schemaCtx := newSchemaContext(typ.Package, p) + schema := infoToSchema(schemaCtx.ForInfo(info)) + + p.Schemata[typ] = *schema + + return +} + +// NeedCRDFor lives off in spec.go + +// AddPackage indicates that types and type-checking information is needed +// for the the given package, *ignoring* overrides. +// Generally, consumers should call NeedPackage, while PackageOverrides should +// call AddPackage to continue with the normal loading procedure. +func (p *Parser) AddPackage(pkg *loader.Package) { + p.init() + if _, checked := p.packages[pkg]; checked { + return + } + p.indexTypes(pkg) + p.Checker.Check(pkg, filterTypesForCRDs) + p.packages[pkg] = struct{}{} +} + +// NeedPackage indicates that types and type-checking information +// is needed for the given package. +func (p *Parser) NeedPackage(pkg *loader.Package) { + p.init() + if _, checked := p.packages[pkg]; checked { + return + } + // overrides are going to be written without vendor. This is why we index by the actual + // object when we can. + if override, overridden := p.PackageOverrides[loader.NonVendorPath(pkg.PkgPath)]; overridden { + override(p, pkg) + p.packages[pkg] = struct{}{} + return + } + p.AddPackage(pkg) +} + +// filterTypesForCRDs filters out all nodes that aren't used in CRD generation, +// like interfaces and struct fields without JSON tag. +func filterTypesForCRDs(node ast.Node) bool { + switch node := node.(type) { + case *ast.InterfaceType: + // skip interfaces, we never care about references in them + return false + case *ast.StructType: + return true + case *ast.Field: + _, hasTag := loader.ParseAstTag(node.Tag).Lookup("json") + // fields without JSON tags mean we have custom serialization, + // so only visit fields with tags. + return hasTag + default: + return true + } +} diff --git a/pkg/crd/schema.go b/pkg/crd/schema.go new file mode 100644 index 000000000..6e545698f --- /dev/null +++ b/pkg/crd/schema.go @@ -0,0 +1,393 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/ast" + "go/types" + "strings" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// Schema flattening is done in a recursive mapping method. +// Start reading at infoToSchema. + +const ( + // defPrefix is the prefix used to link to definitions in the OpenAPI schema. + defPrefix = "#/definitions/" +) + +var ( + // byteType is the types.Type for byte (see the types documention + // for why we need to look this up in the Universe), saved + // for quick comparison. + byteType = types.Universe.Lookup("byte").Type() +) + +// SchemaMarker is any marker that needs to modify the schema of the underlying type or field. +type SchemaMarker interface { + // ApplyToSchema is called after the rest of the schema for a given type + // or field is generated, to modify the schema appropriately. + ApplyToSchema(*v1beta1.JSONSchemaProps) error +} + +// applyFirstMarker is applied before any other markers. It's a bit of a hack. +type applyFirstMarker interface { + ApplyFirst() +} + +// schemaRequester knows how to marker that another schema (e.g. via an external reference) is necessary. +type schemaRequester interface { + NeedSchemaFor(typ TypeIdent) +} + +// schemaContext stores and provides information across a hierarchy of schema generation. +type schemaContext struct { + pkg *loader.Package + info *markers.TypeInfo + + schemaRequester schemaRequester +} + +// newSchemaContext constructs a new schemaContext for the given package and schema requester. +// It must have type info added before use via ForInfo. +func newSchemaContext(pkg *loader.Package, req schemaRequester) *schemaContext { + pkg.NeedTypesInfo() + return &schemaContext{ + pkg: pkg, + schemaRequester: req, + } +} + +// ForInfo produces a new schemaContext with containing the same information +// as this one, except with the given type information. +func (c *schemaContext) ForInfo(info *markers.TypeInfo) *schemaContext { + return &schemaContext{ + pkg: c.pkg, + info: info, + schemaRequester: c.schemaRequester, + } +} + +// requestSchema asks for the schema for a type in the package with the +// given import path. +func (c *schemaContext) requestSchema(pkgPath, typeName string) { + pkg := c.pkg + if pkgPath != "" { + pkg = c.pkg.Imports()[pkgPath] + } + c.schemaRequester.NeedSchemaFor(TypeIdent{ + Package: pkg, + Name: typeName, + }) +} + +// infoToSchema creates a schema for the type in the given set of type information. +func infoToSchema(ctx *schemaContext) *v1beta1.JSONSchemaProps { + return typeToSchema(ctx, ctx.info.RawSpec.Type) +} + +// applyMarkers applies schema markers to the given schema, respecting "apply first" markers. +func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *v1beta1.JSONSchemaProps, node ast.Node) { + // apply "apply first" markers first... + for _, markerValues := range markerSet { + for _, markerValue := range markerValues { + if _, isApplyFirst := markerValue.(applyFirstMarker); !isApplyFirst { + continue + } + + schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) + if !isSchemaMarker { + continue + } + + if err := schemaMarker.ApplyToSchema(props); err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) + } + } + } + + // ...then the rest of the markers + for _, markerValues := range markerSet { + for _, markerValue := range markerValues { + if _, isApplyFirst := markerValue.(applyFirstMarker); isApplyFirst { + // skip apply-first markers, which were already applied + continue + } + + schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) + if !isSchemaMarker { + continue + } + if err := schemaMarker.ApplyToSchema(props); err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) + } + } + } +} + +// typeToSchema creates a schema for the given AST type. +func typeToSchema(ctx *schemaContext, rawType ast.Expr) *v1beta1.JSONSchemaProps { + var props *v1beta1.JSONSchemaProps + switch expr := rawType.(type) { + case *ast.Ident: + props = localNamedToSchema(ctx, expr) + case *ast.SelectorExpr: + props = namedToSchema(ctx, expr) + case *ast.ArrayType: + props = arrayToSchema(ctx, expr) + case *ast.MapType: + props = mapToSchema(ctx, expr) + case *ast.StarExpr: + props = typeToSchema(ctx, expr.X) + case *ast.StructType: + props = structToSchema(ctx, expr) + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unsupported AST kind %T", expr), rawType)) + // NB(directxman12): we explicitly don't handle interfaces + return &v1beta1.JSONSchemaProps{} + } + + props.Description = ctx.info.Doc + + applyMarkers(ctx, ctx.info.Markers, props, rawType) + + return props +} + +// qualifiedName constructs a JSONSchema-safe qualified name for a type +// (`` or `~0`, where `` +// is the package path with `/` replaced by `~1`, according to JSONPointer +// escapes). +func qualifiedName(pkgName, typeName string) string { + if pkgName != "" { + return strings.Replace(pkgName, "/", "~1", -1) + "~0" + typeName + } + return typeName +} + +// definitionLink creates a definition link for the given type and package. +func definitionLink(pkgName, typeName string) string { + return defPrefix + qualifiedName(pkgName, typeName) +} + +// localNamedToSchema creates a schema (ref) for a *potentially* local type reference +// (could be external from a dot-import). +func localNamedToSchema(ctx *schemaContext, ident *ast.Ident) *v1beta1.JSONSchemaProps { + typeInfo := ctx.pkg.TypesInfo.TypeOf(ident) + if typeInfo == types.Typ[types.Invalid] { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", ident.Name), ident)) + return &v1beta1.JSONSchemaProps{} + } + if basicInfo, isBasic := typeInfo.(*types.Basic); isBasic { + typ, fmt, err := builtinToType(basicInfo) + if err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err, ident)) + } + return &v1beta1.JSONSchemaProps{ + Type: typ, + Format: fmt, + } + } + // NB(directxman12): if there are dot imports, this might be an external reference, + // so use typechecking info to get the actual object + typeNameInfo := typeInfo.(*types.Named).Obj() + pkg := typeNameInfo.Pkg() + pkgPath := loader.NonVendorPath(pkg.Path()) + if pkg == ctx.pkg.Types { + pkgPath = "" + } + ctx.requestSchema(pkgPath, typeNameInfo.Name()) + link := definitionLink(pkgPath, typeNameInfo.Name()) + return &v1beta1.JSONSchemaProps{ + Ref: &link, + } +} + +// namedSchema creates a schema (ref) for an explicitly external type reference. +func namedToSchema(ctx *schemaContext, named *ast.SelectorExpr) *v1beta1.JSONSchemaProps { + typeInfoRaw := ctx.pkg.TypesInfo.TypeOf(named) + if typeInfoRaw == types.Typ[types.Invalid] { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %v.%s", named.X, named.Sel.Name), named)) + return &v1beta1.JSONSchemaProps{} + } + typeInfo := typeInfoRaw.(*types.Named) + typeNameInfo := typeInfo.Obj() + nonVendorPath := loader.NonVendorPath(typeNameInfo.Pkg().Path()) + ctx.requestSchema(nonVendorPath, typeNameInfo.Name()) + link := definitionLink(nonVendorPath, typeNameInfo.Name()) + return &v1beta1.JSONSchemaProps{ + Ref: &link, + } + // NB(directxman12): we special-case things like resource.Quantity during the "collapse" phase. +} + +// arrayToSchema creates a schema for the items of the given array, dealing appropriately +// with the special `[]byte` type (according to OpenAPI standards). +func arrayToSchema(ctx *schemaContext, array *ast.ArrayType) *v1beta1.JSONSchemaProps { + eltType := ctx.pkg.TypesInfo.TypeOf(array.Elt) + if eltType == byteType && array.Len == nil { + // byte slices are represented as base64-encoded strings + // (the format is defined in OpenAPI v3, but not JSON Schema) + return &v1beta1.JSONSchemaProps{ + Type: "string", + Format: "byte", + } + } + // TODO(directxman12): backwards-compat would require access to markers from base info + items := typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), array.Elt) + + return &v1beta1.JSONSchemaProps{ + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{Schema: items}, + } +} + +// mapToSchema creates a schema for items of the given map. Key types must eventually resolve +// to string (other types aren't allowed by JSON, and thus the kubernetes API standards). +func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *v1beta1.JSONSchemaProps { + keyInfo := ctx.pkg.TypesInfo.TypeOf(mapType.Key) + // check that we've got a type that actually corresponds to a string + for keyInfo != nil { + switch typedKey := keyInfo.(type) { + case *types.Basic: + if typedKey.Info()&types.IsString == 0 { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) + return &v1beta1.JSONSchemaProps{} + } + keyInfo = nil // stop iterating + case *types.Named: + keyInfo = typedKey.Underlying() + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) + return &v1beta1.JSONSchemaProps{} + } + } + + // TODO(directxman12): backwards-compat would require access to markers from base info + var valSchema *v1beta1.JSONSchemaProps + switch val := mapType.Value.(type) { + case *ast.Ident: + valSchema = localNamedToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + case *ast.SelectorExpr: + valSchema = namedToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map values must be a named type, not %T", mapType.Value), mapType.Value)) + return &v1beta1.JSONSchemaProps{} + } + + return &v1beta1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{ + Schema: valSchema, + }, + } +} + +// structToSchema creates a schema for the given struct. Embedded fields are placed in AllOf, +// and can be flattened later with a Flattener. +func structToSchema(ctx *schemaContext, structType *ast.StructType) *v1beta1.JSONSchemaProps { + props := &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: make(map[string]v1beta1.JSONSchemaProps), + } + + if ctx.info.RawSpec.Type != structType { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("encountered non-top-level struct (possibly embedded), those aren't allowed"), structType)) + return props + } + + for _, field := range ctx.info.Fields { + jsonTag, hasTag := field.Tag.Lookup("json") + if !hasTag { + // if the field doesn't have a JSON tag, it doesn't belong in output (and shouldn't exist in a serialized type) + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("enountered struct field %q without JSON tag in type %q", field.Name, ctx.info.Name), field.RawField)) + continue + } + jsonOpts := strings.Split(jsonTag, ",") + if len(jsonOpts) == 1 && jsonOpts[0] == "-" { + // skipped fields have the tag "-" (note that "-," means the field is named "-") + continue + } + + inline := false + omitEmpty := false + for _, opt := range jsonOpts[1:] { + switch opt { + case "inline": + inline = true + case "omitempty": + omitEmpty = true + } + } + fieldName := jsonOpts[0] + inline = inline || fieldName == "" // anonymous fields are inline fields in YAML/JSON + + if !inline && !omitEmpty { + props.Required = append(props.Required, fieldName) + } + + propSchema := typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), field.RawField.Type) + propSchema.Description = field.Doc + + applyMarkers(ctx, field.Markers, propSchema, field.RawField) + + if inline { + props.AllOf = append(props.AllOf, *propSchema) + continue + } + + props.Properties[fieldName] = *propSchema + } + + return props +} + +// builtinToType converts builtin basic types to their equivalent JSON schema form. +// It *only* handles types allowed by the kubernetes API standards, meaning +// no floats (technically we shouldn't allow int64 or plain int either, but :shrug:). +func builtinToType(basic *types.Basic) (typ string, format string, err error) { + // NB(directxman12): formats from OpenAPI v3 are slightly different than those defined + // in JSONSchema. This'll use the OpenAPI v3 ones, since they're useful for bounding our + // non-string types. + basicInfo := basic.Info() + switch { + case basicInfo&types.IsBoolean != 0: + typ = "boolean" + case basicInfo&types.IsString != 0: + typ = "string" + case basicInfo&types.IsInteger != 0: + typ = "integer" + default: + // NB(directxman12): floats are *NOT* allowed in kubernetes APIs + return "", "", fmt.Errorf("unsupported type %q", basic.String()) + } + + switch basic.Kind() { + case types.Int32, types.Uint32: + format = "int32" + case types.Int64, types.Uint64: + format = "int64" + } + + return typ, format, nil +} diff --git a/pkg/crd/schema_visitor.go b/pkg/crd/schema_visitor.go new file mode 100644 index 000000000..ab4b1889c --- /dev/null +++ b/pkg/crd/schema_visitor.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +// SchemaVisitor walks the nodes of a schema. +type SchemaVisitor interface { + // Visit is called for each schema node. If it returns a visitor, + // the visitor will be called on each direct child node, and then + // this visitor will be called again with `nil` to indicate that + // all children have been visited. If a nil visitor is returned, + // children are not visited. + Visit(schema *apiext.JSONSchemaProps) SchemaVisitor +} + +// EditSchema walks the given schema using the given visitor. Actual +// pointers to each schema node are passed to the visitor, so any changes +// made by the visitor will be reflected to the passed-in schema. +func EditSchema(schema *apiext.JSONSchemaProps, visitor SchemaVisitor) { + walker := schemaWalker{visitor: visitor} + walker.walkSchema(schema) +} + +// schemaWalker knows how to walk the schema, saving modifications +// made by the given visitor. +type schemaWalker struct { + visitor SchemaVisitor +} + +// walkSchema walks the given schema, saving modifications made by the +// visitor (this is as simple as passing a pointer in most cases, +// but special care needs to be taken to persist with maps). +func (w schemaWalker) walkSchema(schema *apiext.JSONSchemaProps) { + subVisitor := w.visitor.Visit(schema) + if subVisitor == nil { + return + } + defer subVisitor.Visit(nil) + + subWalker := schemaWalker{visitor: subVisitor} + if schema.Items != nil { + subWalker.walkPtr(schema.Items.Schema) + subWalker.walkSlice(schema.Items.JSONSchemas) + } + subWalker.walkSlice(schema.AllOf) + subWalker.walkSlice(schema.OneOf) + subWalker.walkSlice(schema.AnyOf) + subWalker.walkPtr(schema.Not) + subWalker.walkMap(schema.Properties) + if schema.AdditionalProperties != nil { + subWalker.walkPtr(schema.AdditionalProperties.Schema) + } + subWalker.walkMap(schema.PatternProperties) + for name, dep := range schema.Dependencies { + subWalker.walkPtr(dep.Schema) + schema.Dependencies[name] = dep + } + if schema.AdditionalItems != nil { + subWalker.walkPtr(schema.AdditionalItems.Schema) + } + subWalker.walkMap(schema.Definitions) +} + +// walkMap walks over values of the given map, saving changes to them. +func (w schemaWalker) walkMap(defs map[string]apiext.JSONSchemaProps) { + for name, def := range defs { + w.walkSchema(&def) + // make sure the edits actually go through since we can't + // take a reference to the value in the map + defs[name] = def + } +} + +// walkSlice walks over items of the given slice. +func (w schemaWalker) walkSlice(defs []apiext.JSONSchemaProps) { + for i := range defs { + w.walkSchema(&defs[i]) + } +} + +// walkPtr walks over the contents of the given pointer, if it's not nil. +func (w schemaWalker) walkPtr(def *apiext.JSONSchemaProps) { + if def == nil { + return + } + w.walkSchema(def) +} diff --git a/pkg/crd/spec.go b/pkg/crd/spec.go new file mode 100644 index 000000000..a241010b0 --- /dev/null +++ b/pkg/crd/spec.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package crd + +import ( + "fmt" + "strings" + + "github.com/markbates/inflect" + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// SpecMarker is a marker that knows how to apply itself to a particular +// version in a CRD. +type SpecMarker interface { + // ApplyToCRD applies this marker to the given CRD, in the given version + // within that CRD. It's called after everything else in the CRD is populated. + ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error +} + +// NeedCRDFor requests the full CRD for the given group-kind. It requires +// that the packages containing the Go structs for that CRD have already +// been loaded with NeedPackage. +func (p *Parser) NeedCRDFor(groupKind schema.GroupKind) { + p.init() + + if _, exists := p.CustomResourceDefinitions[groupKind]; exists { + return + } + + var packages []*loader.Package + for pkg, gv := range p.GroupVersions { + if gv.Group != groupKind.Group { + continue + } + packages = append(packages, pkg) + } + + defaultPlural := inflect.NewDefaultRuleset().Pluralize(strings.ToLower(groupKind.Kind)) + crd := apiext.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apiext.SchemeGroupVersion.String(), + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: defaultPlural + "." + groupKind.Group, + }, + Spec: apiext.CustomResourceDefinitionSpec{ + Group: groupKind.Group, + Names: apiext.CustomResourceDefinitionNames{ + Kind: groupKind.Kind, + Plural: defaultPlural, + }, + }, + } + + for _, pkg := range packages { + typeIdent := TypeIdent{Package: pkg, Name: groupKind.Kind} + typeInfo := p.Types[typeIdent] + if typeInfo == nil { + continue + } + fullSchema := FlattenEmbedded(p.flattener.FlattenType(typeIdent)) + ver := apiext.CustomResourceDefinitionVersion{ + Name: p.GroupVersions[pkg].Version, + Served: true, + Schema: &apiext.CustomResourceValidation{ + OpenAPIV3Schema: fullSchema, + }, + } + crd.Spec.Versions = append(crd.Spec.Versions, ver) + } + + // markers are applied *after* initial generation of objects + for _, pkg := range packages { + typeIdent := TypeIdent{Package: pkg, Name: groupKind.Kind} + typeInfo := p.Types[typeIdent] + if typeInfo == nil { + continue + } + ver := p.GroupVersions[pkg].Version + + for _, markerVals := range typeInfo.Markers { + for _, val := range markerVals { + crdMarker, isCrdMarker := val.(SpecMarker) + if !isCrdMarker { + continue + } + if err := crdMarker.ApplyToCRD(&crd.Spec, ver); err != nil { + pkg.AddError(loader.ErrFromNode(err /* an okay guess */, typeInfo.RawSpec)) + } + } + } + } + + // fix the name if the plural was changed (this is the form the name *has* to take, so no harm in chaning it). + crd.Name = crd.Spec.Names.Plural + "." + groupKind.Group + + // nothing to actually write + if len(crd.Spec.Versions) == 0 { + return + } + + // make sure we have *a* storage version + // (default it if we only have one, otherwise, bail) + if len(crd.Spec.Versions) == 1 { + crd.Spec.Versions[0].Storage = true + } + + hasStorage := false + for _, ver := range crd.Spec.Versions { + if ver.Storage { + hasStorage = true + break + } + } + if !hasStorage { + // just add the error to the first relevant package for this CRD, + // since there's no specific error location + packages[0].AddError(fmt.Errorf("CRD for %s has no storage version", groupKind)) + } + + p.CustomResourceDefinitions[groupKind] = crd +} From aec277d514d28ac2fd68d80a2ddc237c3981943f Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 23:00:12 -0700 Subject: [PATCH 08/14] Deepcopy generation This implements deepcopy generation, mostly copying from deepcopy-gen but using the generator framework. It slightly scopes down things to only implement parts we care about for custom types that represent CRDs. --- pkg/deepcopy/doc.go | 23 ++ pkg/deepcopy/gen.go | 265 +++++++++++++++ pkg/deepcopy/traverse.go | 709 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 997 insertions(+) create mode 100644 pkg/deepcopy/doc.go create mode 100644 pkg/deepcopy/gen.go create mode 100644 pkg/deepcopy/traverse.go diff --git a/pkg/deepcopy/doc.go b/pkg/deepcopy/doc.go new file mode 100644 index 000000000..f4200f2fc --- /dev/null +++ b/pkg/deepcopy/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package deepcopy generates DeepCopy, DeepCopyInto, and DeepCopyObject +// implementations for types. +// +// It's ported from k8s.io/code-generator's / k8s.io/gengo's deepcopy-gen, +// but it's scoped specifically to runtime.Object and skips support for +// deepcopying interfaces, which aren't handled in CRDs anyway. +package deepcopy diff --git a/pkg/deepcopy/gen.go b/pkg/deepcopy/gen.go new file mode 100644 index 000000000..4bf018805 --- /dev/null +++ b/pkg/deepcopy/gen.go @@ -0,0 +1,265 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deepcopy + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "io" + "sort" + "strings" + + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// NB(directxman12): markers.LoadRoots ignores autogenerated code via a build tag +// so any time we check for existing deepcopy functions, we only seen manually written ones. + +const ( + objName = "kubebuilder:object:root" + enableName = "kubebuilder:object:generate" + + legacyEnableName = "k8s:deepcopy-gen" + legacyObjName = "k8s:deepcopy-gen:interfaces" + runtimeObjPath = "k8s.io/apimachinery/pkg/runtime.Object" +) + +// Generator generates code code containing DeepCopy, DeepCopyInto, and +// DeepCopyObject method implementations. +type Generator struct { + HeaderFile string `marker:",optional"` + Year string `marker:",optional"` +} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + if err := into.Define(enableName, markers.DescribesPackage, false); err != nil { + return err + } + if err := into.Define(legacyEnableName, markers.DescribesPackage, markers.RawArguments(nil)); err != nil { + return err + } + if err := into.Define(enableName, markers.DescribesType, false); err != nil { + return err + } + if err := into.Define(legacyEnableName, markers.DescribesType, markers.RawArguments(nil)); err != nil { + return err + } + if err := into.Define(objName, markers.DescribesType, false); err != nil { + return err + } + if err := into.Define(legacyObjName, markers.DescribesType, ""); err != nil { + return err + } + return nil +} + +func enabledOnPackage(col *markers.Collector, pkg *loader.Package) (bool, error) { + pkgMarkers, err := markers.PackageMarkers(col, pkg) + if err != nil { + return false, err + } + pkgMarker := pkgMarkers.Get(enableName) + if pkgMarker != nil { + return pkgMarker.(bool), nil + } + legacyMarker := pkgMarkers.Get(legacyEnableName) + if legacyMarker != nil { + legacyMarkerVal := string(legacyMarker.(markers.RawArguments)) + firstArg := strings.Split(legacyMarkerVal, ",")[0] + return firstArg == "package", nil + } + + return false, nil +} + +func enabledOnType(allTypes bool, info *markers.TypeInfo) bool { + if typeMarker := info.Markers.Get(enableName); typeMarker != nil { + return typeMarker.(bool) + } + legacyMarker := info.Markers.Get(legacyEnableName) + if legacyMarker != nil { + legacyMarkerVal := string(legacyMarker.(markers.RawArguments)) + return legacyMarkerVal == "true" + } + return allTypes || genObjectInterface(info) +} + +func genObjectInterface(info *markers.TypeInfo) bool { + objectEnabled := info.Markers.Get(objName) + if objectEnabled != nil { + return objectEnabled.(bool) + } + + for _, legacyEnabled := range info.Markers[legacyObjName] { + if legacyEnabled == runtimeObjPath { + return true + } + } + return false +} + +func (d Generator) Generate(ctx *genall.GenerationContext) error { + var headerText string + + checker := ctx.Checker + + if d.HeaderFile != "" { + headerBytes, err := ctx.ReadFile(d.HeaderFile) + if err != nil { + return err + } + headerText = string(headerBytes) + } + headerText = strings.ReplaceAll(headerText, " YEAR", d.Year) + + for _, root := range ctx.Roots { + allTypes, err := enabledOnPackage(ctx.Collector, root) + if err != nil { + root.AddError(err) + continue + } + + checker.Check(root, func(node ast.Node) bool { + // ignore interfaces + _, isIface := node.(*ast.InterfaceType) + return !isIface + }) + + root.NeedTypesInfo() + + byType := make(map[string][]byte) + imports := &importsList{ + byPath: make(map[string]string), + byAlias: make(map[string]string), + pkg: root, + } + // avoid confusing aliases by "reserving" the root package's name as an alias + imports.byAlias[root.Name] = "" + + if err := markers.EachType(ctx.Collector, root, func(info *markers.TypeInfo) { + outContent := new(bytes.Buffer) + + // copy when nabled for all types and not disabled, or enabled + // specifically on this type + if !enabledOnType(allTypes, info) { + return + } + + // avoid copying non-exported types, etc + if !shouldBeCopied(root, info) { + return + } + + copyCtx := ©MethodMaker{ + pkg: root, + importsList: imports, + codeWriter: &codeWriter{out: outContent}, + } + + copyCtx.GenerateMethodsFor(root, info) + + outBytes := outContent.Bytes() + if len(outBytes) > 0 { + byType[info.Name] = outBytes + } + }); err != nil { + root.AddError(err) + continue + } + + if len(byType) == 0 { + continue + } + + outContent := new(bytes.Buffer) + writeHeader(root, outContent, root.Name, imports, headerText) + writeMethods(root, outContent, byType) + + writeFormatted(ctx, root, outContent.Bytes()) + } + + return nil +} + +// writeHeader writes out the build tag, package declaration, and imports +func writeHeader(pkg *loader.Package, out io.Writer, packageName string, imports *importsList, headerText string) { + // NB(directxman12): blank line after build tags to distinguish them from comments + _, err := fmt.Fprintf(out, `// +build !ignore_autogenerated + +%[3]s + +// autogenerated by controller-gen object, do not modify manually + +package %[1]s + +import ( +%[2]s +) + +`, packageName, strings.Join(imports.ImportSpecs(), "\n"), headerText) + if err != nil { + pkg.AddError(err) + } + +} + +// writeMethods writes each method to the file, sorted by type name. +func writeMethods(pkg *loader.Package, out io.Writer, byType map[string][]byte) { + sortedNames := make([]string, 0, len(byType)) + for name := range byType { + sortedNames = append(sortedNames, name) + } + sort.Strings(sortedNames) + + for _, name := range sortedNames { + _, err := out.Write(byType[name]) + if err != nil { + pkg.AddError(err) + } + } +} + +// writeFormatted outputs the given code, after gofmt-ing it. If we couldn't gofmt, +// we write the unformatted code for debugging purposes. +func writeFormatted(ctx *genall.GenerationContext, root *loader.Package, outBytes []byte) { + formattedBytes, err := format.Source(outBytes) + if err != nil { + root.AddError(err) + // we still write the invalid source to disk to figure out what went wrong + } else { + outBytes = formattedBytes + } + + outputFile, err := ctx.Open(root, "zz_generated.deepcopy.go") + if err != nil { + root.AddError(err) + return + } + defer outputFile.Close() + n, err := outputFile.Write(outBytes) + if err != nil { + root.AddError(err) + return + } + if n < len(outBytes) { + root.AddError(io.ErrShortWrite) + } +} diff --git a/pkg/deepcopy/traverse.go b/pkg/deepcopy/traverse.go new file mode 100644 index 000000000..4f4fca1ab --- /dev/null +++ b/pkg/deepcopy/traverse.go @@ -0,0 +1,709 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deepcopy + +import ( + "fmt" + "go/ast" + "go/types" + "io" + "path" + "strings" + "unicode" + "unicode/utf8" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// NB(directxman12): This code is a bit of a byzantine mess. +// I've tried to clean it up a bit from the original in deepcopy-gen, +// but parts remain a bit convoluted. Exercise caution when changing. +// It's perhaps a tad over-commented now, but better safe than sorry. + +// codeWriter assists in writing out Go code lines and blocks to a writer. +type codeWriter struct { + out io.Writer +} + +// Line writes a single line. +func (c *codeWriter) Line(line string) { + fmt.Fprintln(c.out, line) +} + +// Linef writes a single line with formatting (as per fmt.Sprintf). +func (c *codeWriter) Linef(line string, args ...interface{}) { + fmt.Fprintf(c.out, line+"\n", args...) +} + +// If writes an if statement with the given setup/condition clause, executing +// the given function to write the contents of the block. +func (c *codeWriter) If(setup string, block func()) { + c.Linef("if %s {", setup) + block() + c.Line("}") +} + +// If writes if and else statements with the given setup/condition clause, executing +// the given functions to write the contents of the blocks. +func (c *codeWriter) IfElse(setup string, ifBlock func(), elseBlock func()) { + c.Linef("if %s {", setup) + ifBlock() + c.Line("} else {") + elseBlock() + c.Line("}") +} + +// For writes an for statement with the given setup/condition clause, executing +// the given function to write the contents of the block. +func (c *codeWriter) For(setup string, block func()) { + c.Linef("for %s {", setup) + block() + c.Line("}") +} + +// importsList keeps track of required imports, automatically assigning aliases +// to import statement. +type importsList struct { + byPath map[string]string + byAlias map[string]string + + pkg *loader.Package +} + +// NeedImport marks that the given package is needed in the list of imports, +// returning the ident (import alias) that should be used to reference the package. +func (l *importsList) NeedImport(importPath string) string { + // we get an actual path from Package, which might include venddored + // packages if running on a package in vendor. + if ind := strings.LastIndex(importPath, "/vendor/"); ind != -1 { + importPath = importPath[ind+8: /* len("/vendor/") */] + } + + // check to see if we've already assigned an alias, and just return that. + alias, exists := l.byPath[importPath] + if exists { + return alias + } + + // otherwise, calculate an import alias by joining path parts till we get something unique + restPath, nextWord := path.Split(importPath) + + for otherPath, exists := "", true; exists && otherPath != importPath; otherPath, exists = l.byAlias[alias] { + if restPath == "" { + // do something else to disambiguate if we're run out of parts and + // still have duplicates, somehow + alias += "x" + } + + // can't have a first digit, per Go identifier rules, so just skip them + for firstRune, runeLen := utf8.DecodeRuneInString(nextWord); unicode.IsDigit(firstRune); firstRune, runeLen = utf8.DecodeRuneInString(nextWord) { + nextWord = nextWord[runeLen:] + } + + // make a valid identifier by replacing "bad" characters with underscores + nextWord = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' + }, nextWord) + + alias = nextWord + alias + if len(restPath) > 0 { + restPath, nextWord = path.Split(restPath[:len(restPath)-1] /* chop off final slash */) + } + } + + l.byPath[importPath] = alias + l.byAlias[alias] = importPath + return alias +} + +// ImportSpecs returns a string form of each import spec +// (i.e. `alias "path/to/import"). Aliases are only present +// when they don't match the package name. +func (l *importsList) ImportSpecs() []string { + res := make([]string, 0, len(l.byPath)) + for importPath, alias := range l.byPath { + pkg := l.pkg.Imports()[importPath] + if pkg != nil && pkg.Name == alias { + // don't print if alias is the same as package name + // (we've already taken care of duplicates). + res = append(res, fmt.Sprintf("%q", importPath)) + } else { + res = append(res, fmt.Sprintf("%s %q", alias, importPath)) + } + } + return res +} + +// copyMethodMakers makes DeepCopy (and related) methods for Go types, +// writing them to its codeWriter. +type copyMethodMaker struct { + pkg *loader.Package + *importsList + *codeWriter +} + +// GenerateMethodsFor makes DeepCopy, DeepCopyInto, and DeepCopyObject methods +// for the given type, when appropriate +func (c *copyMethodMaker) GenerateMethodsFor(root *loader.Package, info *markers.TypeInfo) { + typeInfo := root.TypesInfo.TypeOf(info.RawSpec.Type) + if typeInfo == types.Typ[types.Invalid] { + root.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", info.Name), info.RawSpec)) + } + + // figure out if we need to use a pointer receiver -- most types get a pointer receiver, + // except those that are aliases to types that are already pass-by-reference (pointers, + // interfaces. maps, slices). + ptrReceiver := usePtrReceiver(typeInfo) + + hasManualDeepCopyInto := hasDeepCopyIntoMethod(root, typeInfo) + hasManualDeepCopy, deepCopyOnPtr := hasDeepCopyMethod(root, typeInfo) + + // only generate each method if it hasn't been implemented. + if !hasManualDeepCopyInto { + c.Line("// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.") + if ptrReceiver { + c.Linef("func (in *%s) DeepCopyInto(out *%s) {", info.Name, info.Name) + } else { + c.Linef("func (in %s) DeepCopyInto(out *%s) {", info.Name, info.Name) + c.Line("in := &in") + } + + // just wrap the existing deepcopy if present + if hasManualDeepCopy { + if deepCopyOnPtr { + c.Line("clone := in.DeepCopy()") + c.Line("*out := *clone") + } else { + c.Line("*out := in.DeepCopy()") + } + } else { + c.genDeepCopyIntoBlock(info.Name, typeInfo) + } + + c.Line("}") + } + + if !hasManualDeepCopy { + // these are both straightforward, so we just template them out. + if ptrReceiver { + c.Linef(ptrDeepCopy, info.Name) + } else { + c.Linef(bareDeepCopy, info.Name) + } + + // maybe also generate DeepCopyObject, if asked. + if genObjectInterface(info) { + // we always need runtime.Object for DeepCopyObject + runtimeAlias := c.NeedImport("k8s.io/apimachinery/pkg/runtime") + if ptrReceiver { + c.Linef(ptrDeepCopyObj, info.Name, runtimeAlias) + } else { + c.Linef(bareDeepCopyObj, info.Name, runtimeAlias) + } + } + } +} + +// genDeepCopyBody generates a DeepCopyInto block for the given type. The +// block is *not* wrapped in curly braces. +func (c *copyMethodMaker) genDeepCopyIntoBlock(actualName string, typeInfo types.Type) { + // we might hit a type that has a manual deepcopy method written on non-root types + // (this case is handled for root types in GenerateMethodFor) + if hasAnyDeepCopyMethod(c.pkg, typeInfo) { + c.Line("*out = in.DeepCopy()") + return + } + + // we calculate *how* we should copy mostly based on the "eventual" type of + // a given type (i.e. the type that results from following all aliases) + last := eventualUnderlyingType(typeInfo) + + switch last := last.(type) { + case *types.Basic: + // basic types themselves can be "shallow" copied, so all we need + // to do is check if our *actual* type (not the underlying one) has + // a custom method implemented. + if hasMethod, _ := hasDeepCopyMethod(c.pkg, typeInfo); hasMethod { + c.Line("*out = in.DeepCopy()") + } + c.Line("*out = *in") + case *types.Map: + c.genMapDeepCopy(actualName, last) + case *types.Slice: + c.genSliceDeepCopy(actualName, last) + case *types.Struct: + c.genStructDeepCopy(actualName, last) + case *types.Pointer: + c.genPointerDeepCopy(actualName, last) + case *types.Named: + // handled via the above loop, should never happen + c.pkg.AddError(fmt.Errorf("interface type %s encountered directly, invalid condition", last)) + default: + c.pkg.AddError(fmt.Errorf("invalid type %s", last)) + } +} + +// genMapDeepCopy generates DeepCopy code for the given named type whose eventual +// type is the given map type. +func (c *copyMethodMaker) genMapDeepCopy(actualName string, mapType *types.Map) { + // maps *must* have shallow-copiable types, since we just iterate + // through the keys, only trying to deepcopy the values. + if !fineToShallowCopy(mapType.Key()) { + c.pkg.AddError(fmt.Errorf("invalid map key type %s", mapType.Key())) + return + } + + // make our actual type (not the underlying one)... + c.Linef("*out = make(%[1]s, len(*in))", actualName) + + // ...and copy each element appropriately + c.For("key, val := range *in", func() { + // check if we have manually written methods, + // in which case we'll just try and use those + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, mapType.Elem()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, mapType.Elem()) + switch { + case hasDeepCopyInto || hasDeepCopy: + // use the manually-written methods + _, outNeedsPtr := mapType.Elem().(*types.Pointer) // is "out" actually a pointer + inIsPtr := usePtrReceiver(mapType.Elem()) // does copying "in" produce a pointer + if hasDeepCopy { + // If we're calling DeepCopy, check if it's receiver needs a pointer + inIsPtr = copyOnPtr + } + if inIsPtr == outNeedsPtr { + c.Line("(*out)[key] = val.DeepCopy()") + } else if outNeedsPtr { + c.Line("x := val.DeepCopy()") + c.Line("(*out)[key] = &x") + } else { + c.Line("(*out)[key] = *val.DeepCopy()") + } + case fineToShallowCopy(mapType.Elem()): + // just shallow copy types for which it's safe to do so + c.Line("(*out)[key] = val") + default: + // otherwise, we've got some kind-specific actions, + // based on the element's eventual type. + + underlyingElem := eventualUnderlyingType(mapType.Elem()) + + // if it passes by reference, let the main switch handle it + if passesByReference(underlyingElem) { + c.Linef("var outVal %[1]s", c.syntaxFor(underlyingElem)) + c.IfElse("val == nil", func() { + c.Line("(*out)[key] = nil") + }, func() { + c.Line("in, out := &val, &outVal") + c.genDeepCopyIntoBlock(c.syntaxFor(mapType.Elem()), mapType.Elem()) + }) + c.Line("(*out)[key] = outVal") + + return + } + + // otherwise... + switch underlyingElem := underlyingElem.(type) { + case *types.Struct: + // structs will have deepcopy generated for them, so use that + c.Line("(*out)[key] = *val.DeepCopy()") + default: + c.pkg.AddError(fmt.Errorf("invalid map value type %s", underlyingElem)) + return + } + } + }) +} + +// genSliceDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given slice. +func (c *copyMethodMaker) genSliceDeepCopy(actualName string, sliceType *types.Slice) { + underlyingElem := eventualUnderlyingType(sliceType.Elem()) + + // make the actual type (not the underlying) + c.Linef("*out = make(%[1]s, len(*in))", actualName) + + // check if we need to do anything special, or just copy each element appropriately + switch { + case hasAnyDeepCopyMethod(c.pkg, sliceType.Elem()): + // just use deepcopy if it's present (deepcopyinto will be filled in by our code) + c.For("i := range *in", func() { + c.Line("(*in)[1].DeepCopyInto(&(*out)[i]") + }) + case fineToShallowCopy(underlyingElem): + // shallow copy if ok + c.Line("copy(*out, *in)") + default: + // copy each element appropriately + c.For("i := range *in", func() { + // fall back to normal code for reference types or those with custom logic + if passesByReference(underlyingElem) || hasAnyDeepCopyMethod(c.pkg, sliceType.Elem()) { + c.If("(*in)[i] != nil", func() { + c.Line("in, out := &(*in)[i], &(*out)[i]") + c.genDeepCopyIntoBlock(c.syntaxFor(sliceType.Elem()), sliceType.Elem()) + }) + return + } + + switch underlyingElem.(type) { + case *types.Struct: + // structs will always have deepcopy + c.Linef("(*in)[i].DeepCopyInto(&(*out)[i])") + default: + c.pkg.AddError(fmt.Errorf("invalid slice element type %s", underlyingElem)) + } + }) + } +} + +// genStructDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given struct. +func (c *copyMethodMaker) genStructDeepCopy(_ string, structType *types.Struct) { + c.Line("*out = *in") + + for i := 0; i < structType.NumFields(); i++ { + field := structType.Field(i) + + // if we have a manual deepcopy, use that + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, field.Type()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, field.Type()) + if hasDeepCopyInto || hasDeepCopy { + _, outNeedsPtr := field.Type().(*types.Pointer) + inIsPtr := usePtrReceiver(field.Type()) + if hasDeepCopy { + inIsPtr = copyOnPtr + } + if inIsPtr == outNeedsPtr { + c.Linef("out.%[1]s = in.%[1]s.DeepCopy()", field.Name()) + } else if outNeedsPtr { + c.Linef("x := in.%[1]s.DeepCopy()", field.Name()) + c.Linef("out.%[1]s = &x", field.Name()) + } else { + c.Linef("in.%[1]s.DeepCopyInto(&out.%[1]s)", field.Name()) + } + continue + } + + // pass-by-reference fields get delegated to the main type + underlyingField := eventualUnderlyingType(field.Type()) + if passesByReference(underlyingField) { + c.If(fmt.Sprintf("in.%s != nil", field.Name()), func() { + c.Linef("in, out := &in.%[1]s, &out.%[1]s", field.Name()) + c.genDeepCopyIntoBlock(c.syntaxFor(field.Type()), field.Type()) + }) + return + } + + // otherwise... + switch underlyingField := underlyingField.(type) { + case *types.Basic: + // nothing to do, initial assignment copied this + case *types.Struct: + if fineToShallowCopy(field.Type()) { + c.Linef("out.%[1]s = in.%[1]s", field.Name()) + } else { + c.Linef("in.%[1]s.DeepCopyInto(&out.%[1]s)", field.Name()) + } + default: + c.pkg.AddError(fmt.Errorf("invalid field type %s", underlyingField)) + return + } + } +} + +// genPointerDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given struct. +func (c *copyMethodMaker) genPointerDeepCopy(_ string, pointerType *types.Pointer) { + underlyingElem := eventualUnderlyingType(pointerType.Elem()) + + // if we have a manually written deepcopy, just use that + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, pointerType.Elem()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, pointerType.Elem()) + if hasDeepCopyInto || hasDeepCopy { + outNeedsPtr := usePtrReceiver(pointerType.Elem()) + if hasDeepCopy { + outNeedsPtr = copyOnPtr + } + if outNeedsPtr { + c.Line("*out = (*in).DeepCopy()") + } else { + c.Line("x := (*in).DeepCopy()") + c.Line("*out = &x") + } + return + } + + // shallow-copiable types are pretty easy + if fineToShallowCopy(underlyingElem) { + c.Linef("*out = new(%[1]s)", c.syntaxFor(pointerType.Elem())) + c.Line("**out = **in") + return + } + + // pass-by-reference types get delegated to the main switch + if passesByReference(underlyingElem) { + c.Linef("*out = new(%s)", c.syntaxFor(underlyingElem)) + c.If("**in != nil", func() { + c.Line("in, out := *in, *out") + c.genDeepCopyIntoBlock(c.syntaxFor(underlyingElem), eventualUnderlyingType(underlyingElem)) + }) + return + } + + // otherwise... + switch underlyingElem := underlyingElem.(type) { + case *types.Struct: + c.Linef("*out = new(%[1]s)", c.syntaxFor(pointerType.Elem())) + c.Line("(*in).DeepCopyInto(*out)") + default: + c.pkg.AddError(fmt.Errorf("invalid pointer element type %s", underlyingElem)) + return + } +} + +// syntaxFor returns the Go syntax-utal representation of the given type. +func (c *copyMethodMaker) syntaxFor(typeInfo types.Type) string { + // NB(directxman12): typeInfo.String gets us most of the way there, + // but fails (for us) on named imports, since it uses the full package path. + switch typeInfo := typeInfo.(type) { + case *types.Named: + // register that we need an import for this type, + // so we can get the appropriate alias to use. + typeName := typeInfo.Obj() + otherPkg := typeName.Pkg() + if otherPkg == c.pkg.Types { + // local import + return typeName.Name() + } + alias := c.NeedImport(loader.NonVendorPath(otherPkg.Path())) + return alias + "." + typeName.Name() + case *types.Basic: + return typeInfo.String() + case *types.Pointer: + return "*" + c.syntaxFor(typeInfo.Elem()) + case *types.Slice: + return "[]" + c.syntaxFor(typeInfo.Elem()) + case *types.Map: + return fmt.Sprintf("map[%s]%s", c.syntaxFor(typeInfo.Key()), c.syntaxFor(typeInfo.Elem())) + default: + c.pkg.AddError(fmt.Errorf("name requested for invalid type %s", typeInfo)) + return typeInfo.String() + } +} + +// usePtrReceiver checks if we need a pointer receiver on methods for the given type +// Pass-by-reference types don't get pointer receivers. +func usePtrReceiver(typeInfo types.Type) bool { + switch typeInfo.(type) { + case *types.Pointer: + return false + case *types.Map: + return false + case *types.Slice: + return false + case *types.Named: + return usePtrReceiver(typeInfo.Underlying()) + default: + return true + } +} + +// shouldBeCopied checks if we're supposed to make deepcopy methods the given type. +// +// This is the case if it's exported *and* either: +// - has a partial manual DeepCopy implementation (in which case we fill in the rest) +// - aliases to a non-basic type eventually +// - is a struct +func shouldBeCopied(pkg *loader.Package, info *markers.TypeInfo) bool { + if !ast.IsExported(info.Name) { + return false + } + + typeInfo := pkg.TypesInfo.TypeOf(info.RawSpec.Type) + if typeInfo == types.Typ[types.Invalid] { + pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", info.Name), info.RawSpec)) + return false + } + var lastType types.Type + for underlyingType := typeInfo; underlyingType != lastType; lastType, underlyingType = underlyingType, underlyingType.Underlying() { + // aliases to other things besides basics need copy methods + // (basics can be straight-up shallow-copied) + if _, isBasic := underlyingType.(*types.Basic); !isBasic { + return true + } + + // if it has a manual deepcopy or deepcopyinto, we're fine + if hasAnyDeepCopyMethod(pkg, underlyingType) { + return true + } + } + + // structs are the only thing that's not a basic that's copiable by default + _, isStruct := lastType.(*types.Struct) + return isStruct +} + +// hasDeepCopyMethod checks if this type has a manual DeepCopy method and if +// the method has a pointer receiver. +func hasDeepCopyMethod(pkg *loader.Package, typeInfo types.Type) (bool, bool) { + methods := types.NewMethodSet(typeInfo) + deepCopyMethod := methods.Lookup(pkg.Types, "DeepCopy") + if deepCopyMethod == nil { + return false, false + } + + methodSig := deepCopyMethod.Type().(*types.Signature) + if methodSig.Params() != nil && methodSig.Params().Len() != 0 { + return false, false + } + if methodSig.Results() == nil || methodSig.Results().Len() != 1 { + return false, false + } + if methodSig.Results().At(0).Type() != methodSig.Recv().Type() { + return false, false + } + + _, recvIsPtr := methodSig.Results().At(0).Type().(*types.Pointer) + return true, recvIsPtr +} + +// hasDeepCopyIntoMethod checks if this type has a manual DeepCopyInto method. +func hasDeepCopyIntoMethod(pkg *loader.Package, typeInfo types.Type) bool { + methods := types.NewMethodSet(typeInfo) // todo: recalculating this could be slow, keep across both invocations + deepCopyMethod := methods.Lookup(pkg.Types, "DeepCopyInto") + if deepCopyMethod == nil { + return false + } + + methodSig := deepCopyMethod.Type().(*types.Signature) + if methodSig.Params() == nil || methodSig.Params().Len() != 1 { + return false + } + paramPtr, isPtr := methodSig.Params().At(0).Type().(*types.Pointer) + if !isPtr { + return false + } + if methodSig.Results() != nil && methodSig.Results().Len() != 0 { + return false + } + + if recvPtr, recvIsPtr := methodSig.Recv().Type().(*types.Pointer); recvIsPtr { + return methodSig.Params().At(0).Type() == recvPtr + } + return methodSig.Params().At(0).Type() == paramPtr.Elem() +} + +// hasAnyDeepCopyMethod checks if the given method has DeepCopy or DeepCopyInto +// (either of which implies the other will exist eventually). +func hasAnyDeepCopyMethod(pkg *loader.Package, typeInfo types.Type) bool { + hasDeepCopy, _ := hasDeepCopyMethod(pkg, typeInfo) + return hasDeepCopy || hasDeepCopyIntoMethod(pkg, typeInfo) +} + +// eventualUnderlyingType gets the "final" type in a sequence of named aliases. +// It's effectively a shortcut for calling Underlying in a loop. +func eventualUnderlyingType(typeInfo types.Type) types.Type { + last := typeInfo + for underlying := typeInfo.Underlying(); underlying != last; last, underlying = underlying, underlying.Underlying() { + // get the actual underlying type + } + return last +} + +// fineToShallowCopy checks if a shallow-copying a type is equivalent to deepcopy-ing it. +func fineToShallowCopy(typeInfo types.Type) bool { + switch typeInfo := typeInfo.(type) { + case *types.Basic: + // basic types (int, string, etc) are always fine to shallow-copy + return true + case *types.Named: + // aliases are fine to shallow-copy as long as they resolve to a shallow-copyable type + return fineToShallowCopy(typeInfo.Underlying()) + case *types.Struct: + // structs are fine to shallow-copy if they have all shallow-copyable fields + for i := 0; i < typeInfo.NumFields(); i++ { + field := typeInfo.Field(i) + if !fineToShallowCopy(field.Type()) { + return false + } + } + return true + default: + return false + } +} + +// passesByReference checks if the given type passesByReference +// (except for interfaces, which are handled separately). +func passesByReference(typeInfo types.Type) bool { + switch typeInfo.(type) { + case *types.Slice: + return true + case *types.Map: + return true + case *types.Pointer: + return true + default: + return false + } +} + +var ( + // ptrDeepCopy is a DeepCopy for a type with an existing DeepCopyInto and a pointer receiver. + ptrDeepCopy = ` +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new %[1]s. +func (in *%[1]s) DeepCopy() *%[1]s { + if in == nil { return nil } + out := new(%[1]s) + in.DeepCopyInto(out) + return out +} +` + + // ptrDeepCopy is a DeepCopy for a type with an existing DeepCopyInto and a non-pointer receiver. + bareDeepCopy = ` +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new %[1]s. +func (in %[1]s) DeepCopy() %[1]s { + if in == nil { return nil } + out := new(%[1]s) + in.DeepCopyInto(out) + return *out +} +` + + // ptrDeepCopy is a DeepCopyObject for a type with an existing DeepCopyInto and a pointer receiver. + ptrDeepCopyObj = ` +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *%[1]s) DeepCopyObject() %[2]s.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} +` + // ptrDeepCopy is a DeepCopyObject for a type with an existing DeepCopyInto and a non-pointer receiver. + bareDeepCopyObj = ` +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in %[1]s) DeepCopyObject() %[2]s.Object { + return in.DeepCopy() +} +` +) From c989f191b4fa89051ec76a92b0696ca4140d8cd7 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Fri, 26 Apr 2019 23:01:37 -0700 Subject: [PATCH 09/14] New controller-gen This updates controller-gen to make use of the new generator framework. Generators and ouput rules are treated as markers to parse commandline options. --- cmd/controller-gen/main.go | 445 ++++++++++++++++++++++++------------- 1 file changed, 290 insertions(+), 155 deletions(-) diff --git a/cmd/controller-gen/main.go b/cmd/controller-gen/main.go index 74c29d21d..82b33c41c 100644 --- a/cmd/controller-gen/main.go +++ b/cmd/controller-gen/main.go @@ -17,197 +17,332 @@ package main import ( "fmt" - "log" "os" - "path/filepath" + "sort" + "strings" "github.com/spf13/cobra" - crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" + "sigs.k8s.io/controller-tools/pkg/crd" + "sigs.k8s.io/controller-tools/pkg/deepcopy" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" "sigs.k8s.io/controller-tools/pkg/rbac" - "sigs.k8s.io/controller-tools/pkg/webhook" ) -func main() { - rootCmd := &cobra.Command{ - Use: "controller-gen", - Short: "A reference implementation generation tool for Kubernetes APIs.", - Long: `A reference implementation generation tool for Kubernetes APIs.`, - Example: ` # Generate RBAC manifests for a project - controller-gen rbac - - # Generate CRD manifests for a project - controller-gen crd +// Options are specified to controller-gen by turning generators and output rules into +// markers, and then parsing them using the standard registry logic (without the "+"). +// Each marker and output rule should thus be usable as a marker target. - # Run all the generators for a given project - controller-gen all -`, +var ( + // allGenerators maintains the list of all known generators, giving + // them names for use on the command line. + // each turns into a command line option, + // and has options for output forms. + allGenerators = map[string]genall.Generator{ + "crd": crd.Generator{}, + "rbac": rbac.Generator{}, + "object": deepcopy.Generator{}, } - rootCmd.AddCommand( - newRBACCmd(), - newCRDCmd(), - newWebhookCmd(), - newAllSubCmd(), - ) + // allOutputRules defines the list of all known output rules, giving + // them names for use on the command line. + // Each output rule turns into two command line options: + // - output::
(per-generator output) + // - output: (default output) + allOutputRules = map[string]genall.OutputRule{ + "dir": genall.OutputToDirectory(""), + "none": genall.OutputToNothing, + "stdout": genall.OutputToStdout, + "artifacts": genall.OutputArtifacts{}, + } - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) + // optionsRegistry contains all the marker definitions used to process command line options + optionsRegistry = &markers.Registry{} +) + +// rootPaths is the marker value for the "paths" option +type rootPaths []string + +func init() { + for genName, gen := range allGenerators { + // make the generator options marker itself + if err := optionsRegistry.Define(genName, markers.DescribesPackage, gen); err != nil { + panic(err) + } + + // make per-generation output rule markers + for ruleName, rule := range allOutputRules { + if err := optionsRegistry.Define(fmt.Sprintf("output:%s:%s", genName, ruleName), markers.DescribesPackage, rule); err != nil { + panic(err) + } + } + } + + // make "default output" output rule markers + for ruleName, rule := range allOutputRules { + if err := optionsRegistry.Define("output:"+ruleName, markers.DescribesPackage, rule); err != nil { + panic(err) + } + } + + // make the "paths" rule + if err := optionsRegistry.Define("paths", markers.DescribesPackage, rootPaths(nil)); err != nil { + panic(err) } } -func newRBACCmd() *cobra.Command { - o := &rbac.ManifestOptions{} - o.SetDefaults() +// fieldHelp prints the help for a particular marker argument. +func fieldHelp(name string, arg markers.Argument, first bool, out *strings.Builder) { + if arg.Optional { + out.WriteString("[") + } - cmd := &cobra.Command{ - Use: "rbac", - Short: "Generates RBAC manifests", - Long: `Generate RBAC manifests from the RBAC annotations in Go source files. -Usage: -# controller-gen rbac [--name manager] [--input-dir input_dir] [--output-dir output_dir] -`, - Run: func(_ *cobra.Command, _ []string) { - if err := rbac.Generate(o); err != nil { - log.Fatal(err) - } - fmt.Printf("RBAC manifests generated under '%s' directory\n", o.OutputDir) - }, + if !first { + out.WriteRune(',') + } else if name != "" { + out.WriteRune(':') + } + + if name != "" { + out.WriteString(name) + } + out.WriteRune('=') + + out.WriteRune('<') + out.WriteString(arg.TypeString()) + out.WriteRune('>') + + if arg.Optional { + out.WriteString("]") } +} - f := cmd.Flags() - f.StringVar(&o.Name, "name", o.Name, "name to be used as prefix in identifier for manifests") - f.StringVar(&o.ServiceAccount, "service-account", o.ServiceAccount, "service account to bind the role to") - f.StringVar(&o.Namespace, "service-account-namespace", o.Namespace, "namespace of the service account to bind the role to") - f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files") - f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved") - f.StringVar(&o.RoleFile, "role-file", o.RoleFile, "output file for the role manifest") - f.StringVar(&o.BindingFile, "binding-file", o.BindingFile, "output file for the role binding manifest") +// optionHelp assembles help for a given named option (specified via marker). +func optionHelp(def *markers.Definition, out *strings.Builder) { + out.WriteString(def.Name) + if def.Empty() { + return + } - return cmd + first := true + for argName, arg := range def.Fields { + fieldHelp(argName, arg, first, out) + first = false + } +} + +// allOptionsHelp documents all accepted options +func allOptionsHelp(reg *markers.Registry, descTarget bool, sortFunc func(string, string) bool) string { + out := &strings.Builder{} + allMarkers := reg.AllDefinitions() + sort.Slice(allMarkers, func(i, j int) bool { + // sort grouping marker options together + iName, jName := allMarkers[i].Name, allMarkers[j].Name + return sortFunc(iName, jName) + }) + for _, def := range allMarkers { + out.WriteString(" ") + optionHelp(def, out) + if descTarget { + switch def.Target { + case markers.DescribesPackage: + out.WriteString(" (package)") + case markers.DescribesType: + out.WriteString(" (type)") + case markers.DescribesField: + out.WriteString(" (struct field)") + } + } + out.WriteRune('\n') + } + return out.String() } -func newCRDCmd() *cobra.Command { - g := &crdgenerator.Generator{} +func compOptionsForSort(iName, jName string) bool { + iParts := strings.Split(iName, ":") + jParts := strings.Split(jName, ":") + iGen := "" + iRule := "" + jGen := "" + jRule := "" + + switch len(iParts) { + case 1: + iGen = iParts[0] + // two means a default output rule, so ignore + case 2: + iRule = iParts[1] + case 3: + iGen = iParts[1] + iRule = iParts[2] + } + switch len(jParts) { + case 1: + jGen = jParts[0] + // two means a default output rule, so ignore + case 2: + jRule = jParts[1] + case 3: + jGen = jParts[1] + jRule = jParts[2] + } + + if iGen != jGen { + return iGen > jGen + } + + return iRule < jRule +} + +func main() { + printMarkersOnly := false cmd := &cobra.Command{ - Use: "crd", - Short: "Generates CRD manifests", - Long: `Generate CRD manifests from the Type definitions in Go source files. -Usage: -# controller-gen crd [--domain k8s.io] [--root-path input_dir] [--output-dir output_dir] + Use: "controller-gen", + Short: "Generate Kubernetes API extension resources and code.", + Long: "Generate Kubernetes API extension resources and code.", + Example: ` # Generate RBAC manifests and crds for all types under apis/, + # outputting crds to /tmp/crds and everything else to stdout + controller-gen rbac crd paths=./apis/... output:crd:dir=/tmp/crds output:stdout + + # Generate deepcopy implementations for a particular file + controller-gen deepcopy paths=./apis/v1beta1/some_types.go + + # Run all the generators for a given project + controller-gen paths=./apis/... `, - Run: func(_ *cobra.Command, _ []string) { - if err := g.ValidateAndInitFields(); err != nil { - log.Fatal(err) - } - if err := g.Do(); err != nil { - log.Fatal(err) - } - fmt.Printf("CRD files generated, files can be found under path %s.\n", g.OutputDir) + RunE: func(_ *cobra.Command, rawOpts []string) error { + return runGenerators(rawOpts, printMarkersOnly) }, } + cmd.Flags().BoolVarP(&printMarkersOnly, "which-markers", "w", false, "print out all markers available with the requested generators") + cmd.SetUsageTemplate(cmd.UsageTemplate() + "\n\nOptions\n\n" + allOptionsHelp(optionsRegistry, false, compOptionsForSort)) - f := cmd.Flags() - f.StringVar(&g.RootPath, "root-path", "", "working dir, must have PROJECT file under the path or parent path if domain not set") - f.StringVar(&g.OutputDir, "output-dir", "", "output directory, default to 'config/crds' under root path") - f.StringVar(&g.Domain, "domain", "", "domain of the resources, will try to fetch it from PROJECT file if not specified") - f.StringVar(&g.Namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set") - f.BoolVar(&g.SkipMapValidation, "skip-map-validation", true, "if set to true, skip generating OpenAPI validation schema for map type in CRD.") - f.StringVar(&g.APIsPath, "apis-path", "pkg/apis", "the path to search for apis relative to the current directory") - f.StringVar(&g.APIsPkg, "apis-pkg", "", "the absolute Go pkg name for current project's api pkg.") + if err := cmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} - return cmd +// splitOutputRuleName splits a marker name of "output:rule:gen" or "output:rule" +// into its compontent rule and generator name. +func splitOutputRuleOption(name string) (ruleName string, genName string) { + parts := strings.SplitN(name, ":", 3) + if len(parts) == 3 { + // output:: + return parts[2], parts[1] + } + // output: + return parts[1], "" } -func newAllSubCmd() *cobra.Command { - var ( - projectDir, namespace string - ) +// parseOptions parses out options as markers into generators and output rules +// (the only current types of options). +func parseOptions(options []string) (genall.Generators, genall.OutputRules, []string, error) { + var gens genall.Generators + rules := genall.OutputRules{ + ByGenerator: make(map[genall.Generator]genall.OutputRule), + } + var paths []string - cmd := &cobra.Command{ - Use: "all", - Short: "runs all generators for a project", - Long: `Run all available generators for a given project -Usage: -# controller-gen all -`, - Run: func(_ *cobra.Command, _ []string) { - if projectDir == "" { - currDir, err := os.Getwd() - if err != nil { - log.Fatalf("project-dir missing, failed to use current directory: %v", err) - } - projectDir = currDir - } - crdGen := &crdgenerator.Generator{ - RootPath: projectDir, - OutputDir: filepath.Join(projectDir, "config", "crds"), - Namespace: namespace, - SkipMapValidation: true, - } - if err := crdGen.ValidateAndInitFields(); err != nil { - log.Fatal(err) - } - if err := crdGen.Do(); err != nil { - log.Fatal(err) - } - fmt.Printf("CRD manifests generated under '%s' \n", crdGen.OutputDir) + // collect the generators first, so that we can key the output on the actual + // generator, which matters if there's settings in the gen object and it's not a pointer. + outputByGen := make(map[string]genall.OutputRule) + gensByName := make(map[string]genall.Generator) - // RBAC generation - rbacOptions := &rbac.ManifestOptions{ - InputDir: filepath.Join(projectDir, "pkg"), - OutputDir: filepath.Join(projectDir, "config", "rbac"), - Name: "manager", - } - if err := rbac.Generate(rbacOptions); err != nil { - log.Fatal(err) - } - fmt.Printf("RBAC manifests generated under '%s' \n", rbacOptions.OutputDir) - - o := &webhook.Options{ - WriterOptions: webhook.WriterOptions{ - InputDir: filepath.Join(projectDir, "pkg"), - OutputDir: filepath.Join(projectDir, "config", "webhook"), - PatchOutputDir: filepath.Join(projectDir, "config", "default"), - }, - } - o.SetDefaults() - if err := webhook.Generate(o); err != nil { - log.Fatal(err) + for _, rawOpt := range options { + rawOpt = "+" + rawOpt // add a `+` to make it acceptable for usage with the registry + defn := optionsRegistry.Lookup(rawOpt, markers.DescribesPackage) + if defn == nil { + return nil, genall.OutputRules{}, nil, fmt.Errorf("unknown option %q", rawOpt[1:]) + } + + val, err := defn.Parse(rawOpt) + if err != nil { + return nil, genall.OutputRules{}, nil, fmt.Errorf("unable to parse option %q: %v", rawOpt[1:], err) + } + + switch val := val.(type) { + case genall.Generator: + gens = append(gens, val) + gensByName[defn.Name] = val + case genall.OutputRule: + _, genName := splitOutputRuleOption(defn.Name) + if genName == "" { + // it's a default rule + rules.Default = val + continue } - fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir) - }, + + outputByGen[genName] = val + continue + case rootPaths: + paths = val + default: + return nil, genall.OutputRules{}, nil, fmt.Errorf("unknown option marker %q", defn.Name) + } + } + + // if no generators were specified, we want all of them + if len(gens) == 0 { + for genName, gen := range allGenerators { + gens = append(gens, gen) + gensByName[genName] = gen + } + } + + // actually associate the rules now that we know the generators + for genName, outputRule := range outputByGen { + gen, knownGen := gensByName[genName] + if !knownGen { + return nil, genall.OutputRules{}, nil, fmt.Errorf("non-invoked generator %q", genName) + } + + rules.ByGenerator[gen] = outputRule + } + + // attempt to figure out what the user wants without a lot of verbose specificity: + // if the user specifies a default rule, assume that they probably want to fall back + // to that. Otherwise, assume that they just wanted to customize one option from the + // set, and leave the rest in the standard configuration. + outRules := genall.DirectoryPerGenerator("config", gensByName) + if rules.Default != nil { + return gens, rules, paths, nil + } + + for gen, rule := range rules.ByGenerator { + outRules.ByGenerator[gen] = rule } - f := cmd.Flags() - f.StringVar(&projectDir, "project-dir", "", "project directory, it must have PROJECT file") - f.StringVar(&namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set") - return cmd + + return gens, outRules, paths, nil } -func newWebhookCmd() *cobra.Command { - o := &webhook.Options{} - o.SetDefaults() +func runGenerators(rawOptions []string, printMarkersOnly bool) error { + gens, outputRules, rootPaths, err := parseOptions(rawOptions) + if err != nil { - cmd := &cobra.Command{ - Use: "webhook", - Short: "Generates webhook related manifests", - Long: `Generate webhook related manifests from the webhook annotations in Go source files. -Usage: -# controller-gen webhook [--input-dir input_dir] [--output-dir output_dir] [--patch-output-dir patch-output_dir] -`, - Run: func(_ *cobra.Command, _ []string) { - if err := webhook.Generate(o); err != nil { - log.Fatal(err) - } - fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir) - }, + return err } - f := cmd.Flags() - f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files") - f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved.") - f.StringVar(&o.PatchOutputDir, "patch-output-dir", o.PatchOutputDir, "output directory where generated kustomize patch will be saved.") + if printMarkersOnly { + reg := &markers.Registry{} + if err := gens.RegisterMarkers(reg); err != nil { + return err + } + sortFunc := func(iName string, jName string) bool { return iName < jName } + fmt.Println(allOptionsHelp(reg, true, sortFunc)) + return nil + } + + rt, err := gens.ForRoots(rootPaths...) + if err != nil { + return err + } + rt.OutputRules = outputRules + + if hadErrs := rt.Run(); hadErrs { + return fmt.Errorf("not all generators ran succesfully") + } - return cmd + return nil } From 3a313d48ad4ef0c54a50ef30015ce0f322fd8dc6 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Wed, 1 May 2019 22:39:18 -0700 Subject: [PATCH 10/14] Webhook Generation This adds WebhookConfig generation (as per our existing webhook markers, with slight modifications) on top of the new generator framework. --- cmd/controller-gen/main.go | 2 + pkg/webhook/admission.go | 89 ------- pkg/webhook/generator.go | 334 ------------------------- pkg/webhook/generator_test.go | 160 ------------ pkg/webhook/manifests.go | 151 ----------- pkg/webhook/manifests_test.go | 113 --------- pkg/webhook/parser.go | 314 +++++++++-------------- pkg/webhook/parser_test.go | 171 ------------- pkg/webhook/testdata/input/server.go | 16 -- pkg/webhook/testdata/input/webhooks.go | 24 -- pkg/webhook/types.go | 38 --- pkg/webhook/writer.go | 92 ------- 12 files changed, 125 insertions(+), 1379 deletions(-) delete mode 100644 pkg/webhook/admission.go delete mode 100644 pkg/webhook/generator.go delete mode 100644 pkg/webhook/generator_test.go delete mode 100644 pkg/webhook/manifests.go delete mode 100644 pkg/webhook/manifests_test.go delete mode 100644 pkg/webhook/parser_test.go delete mode 100644 pkg/webhook/testdata/input/server.go delete mode 100644 pkg/webhook/testdata/input/webhooks.go delete mode 100644 pkg/webhook/types.go delete mode 100644 pkg/webhook/writer.go diff --git a/cmd/controller-gen/main.go b/cmd/controller-gen/main.go index 82b33c41c..c0fab74bc 100644 --- a/cmd/controller-gen/main.go +++ b/cmd/controller-gen/main.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/controller-tools/pkg/genall" "sigs.k8s.io/controller-tools/pkg/markers" "sigs.k8s.io/controller-tools/pkg/rbac" + "sigs.k8s.io/controller-tools/pkg/webhook" ) // Options are specified to controller-gen by turning generators and output rules into @@ -42,6 +43,7 @@ var ( "crd": crd.Generator{}, "rbac": rbac.Generator{}, "object": deepcopy.Generator{}, + "webhook": webhook.Generator{}, } // allOutputRules defines the list of all known output rules, giving diff --git a/pkg/webhook/admission.go b/pkg/webhook/admission.go deleted file mode 100644 index 13bd1bca4..000000000 --- a/pkg/webhook/admission.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "errors" - "fmt" - "regexp" - "strings" - - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// admissionWebhook contains bits needed for generating a admissionWebhook Configuration -type admissionWebhook struct { - // name is the name of the webhook - name string - // typ is the webhook type, i.e. mutating, validating - typ webhookType - // path is the path this webhook will serve. - path string - // rules maps to the rules field in admissionregistrationv1beta1.admissionWebhook - rules []admissionregistrationv1beta1.RuleWithOperations - // failurePolicy maps to the failurePolicy field in admissionregistrationv1beta1.admissionWebhook - // This optional. If not set, will be defaulted to Ignore (fail-open) by the server. - // More details: https://github.com/kubernetes/api/blob/f5c295feaba2cbc946f0bbb8b535fc5f6a0345ee/admissionregistration/v1beta1/types.go#L144-L147 - failurePolicy *admissionregistrationv1beta1.FailurePolicyType - // namespaceSelector maps to the namespaceSelector field in admissionregistrationv1beta1.admissionWebhook - // This optional. - namespaceSelector *metav1.LabelSelector -} - -func (w *admissionWebhook) setDefaults() { - if len(w.path) == 0 { - if len(w.rules) == 0 || len(w.rules[0].Resources) == 0 { - // can't do defaulting, skip it. - return - } - if w.typ == mutatingWebhook { - w.path = "/mutate-" + w.rules[0].Resources[0] - } else if w.typ == validatingWebhook { - w.path = "/validate-" + w.rules[0].Resources[0] - } - } - if len(w.name) == 0 { - reg := regexp.MustCompile("[^a-zA-Z0-9]+") - processedPath := strings.ToLower(reg.ReplaceAllString(w.path, "")) - w.name = processedPath + ".example.com" - } -} - -var _ webhook = &admissionWebhook{} - -// GetType returns the type of the webhook. -func (w *admissionWebhook) GetType() webhookType { - return w.typ -} - -// Validate validates if the webhook is valid. -func (w *admissionWebhook) Validate() error { - if len(w.rules) == 0 { - return errors.New("field rules should not be empty") - } - if len(w.name) == 0 { - return errors.New("field name should not be empty") - } - if w.typ != mutatingWebhook && w.typ != validatingWebhook { - return fmt.Errorf("unsupported Type: %v, only mutatingWebhook and validatingWebhook are supported", w.typ) - } - if len(w.path) == 0 { - return errors.New("field path should not be empty") - } - return nil -} diff --git a/pkg/webhook/generator.go b/pkg/webhook/generator.go deleted file mode 100644 index 838ab6c68..000000000 --- a/pkg/webhook/generator.go +++ /dev/null @@ -1,334 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "errors" - "net" - "net/url" - "path" - "sort" - "strconv" - - "k8s.io/api/admissionregistration/v1beta1" - admissionregistration "k8s.io/api/admissionregistration/v1beta1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - apitypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -type generatorOptions struct { - // webhooks maps a path to a webhoook. - webhooks map[string]webhook - - // port is the port number that the server will serve. - // It will be defaulted to 443 if unspecified. - port int32 - - // certDir is the directory that contains the server key and certificate. - certDir string - - // mutatingWebhookConfigName is the name that used for creating the MutatingWebhookConfiguration object. - mutatingWebhookConfigName string - // validatingWebhookConfigName is the name that used for creating the ValidatingWebhookConfiguration object. - validatingWebhookConfigName string - - // secret is the location for storing the certificate for the admission server. - // The server should have permission to create a secret in the namespace. - secret *apitypes.NamespacedName - - // service is a k8s service fronting the webhook server pod(s). - // One and only one of service and host can be set. - // This maps to field .Webhooks.ClientConfig.Service - // https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L260 - service *service - // host is the host name of .Webhooks.ClientConfig.URL - // https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L250 - // One and only one of service and host can be set. - // If neither service nor host is unspecified, host will be defaulted to "localhost". - host *string -} - -// service contains information for creating a Service -type service struct { - // name of the Service - name string - // namespace of the Service - namespace string - // selectors is the selector of the Service. - // This must select the pods that runs this webhook server. - selectors map[string]string -} - -// setDefaults does defaulting for the generatorOptions. -func (o *generatorOptions) setDefaults() { - if o.webhooks == nil { - o.webhooks = map[string]webhook{} - } - if o.port <= 0 { - o.port = 443 - } - if len(o.certDir) == 0 { - o.certDir = path.Join("/tmp", "k8s-webhook-server", "serving-certs") - } - - if len(o.mutatingWebhookConfigName) == 0 { - o.mutatingWebhookConfigName = "mutating-webhook-configuration" - } - if len(o.validatingWebhookConfigName) == 0 { - o.validatingWebhookConfigName = "validating-webhook-configuration" - } - if o.host == nil && o.service == nil { - varString := "localhost" - o.host = &varString - } -} - -// Generate creates the AdmissionWebhookConfiguration objects and Service if any. -// It also provisions the certificate for the admission server. -func (o *generatorOptions) Generate() ([]runtime.Object, error) { - // do defaulting if necessary - o.setDefaults() - - webhookConfigurations, err := o.whConfigs() - if err != nil { - return nil, err - } - svc := o.getService() - objects := append(webhookConfigurations, svc) - - return objects, nil -} - -// whConfigs creates a mutatingWebhookConfiguration and(or) a validatingWebhookConfiguration. -func (o *generatorOptions) whConfigs() ([]runtime.Object, error) { - for _, webhook := range o.webhooks { - if err := webhook.Validate(); err != nil { - return nil, err - } - } - - objs := []runtime.Object{} - mutatingWH, err := o.mutatingWHConfig() - if err != nil { - return nil, err - } - if mutatingWH != nil { - objs = append(objs, mutatingWH) - } - validatingWH, err := o.validatingWHConfigs() - if err != nil { - return nil, err - } - if validatingWH != nil { - objs = append(objs, validatingWH) - } - return objs, nil -} - -// mutatingWHConfig creates mutatingWebhookConfiguration. -func (o *generatorOptions) mutatingWHConfig() (runtime.Object, error) { - mutatingWebhooks := []v1beta1.Webhook{} - for path, webhook := range o.webhooks { - if webhook.GetType() != mutatingWebhook { - continue - } - - aw := webhook.(*admissionWebhook) - wh, err := o.admissionWebhook(path, aw) - if err != nil { - return nil, err - } - mutatingWebhooks = append(mutatingWebhooks, *wh) - } - - sort.Slice(mutatingWebhooks, func(i, j int) bool { - return mutatingWebhooks[i].Name < mutatingWebhooks[j].Name - }) - - if len(mutatingWebhooks) > 0 { - return &admissionregistration.MutatingWebhookConfiguration{ - TypeMeta: metav1.TypeMeta{ - APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(), - Kind: "MutatingWebhookConfiguration", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.mutatingWebhookConfigName, - Annotations: map[string]string{ - // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. - "alpha.admissionwebhook.cert-manager.io": "true", - }, - }, - Webhooks: mutatingWebhooks, - }, nil - } - return nil, nil -} - -func (o *generatorOptions) validatingWHConfigs() (runtime.Object, error) { - validatingWebhooks := []v1beta1.Webhook{} - for path, webhook := range o.webhooks { - var aw *admissionWebhook - if webhook.GetType() != validatingWebhook { - continue - } - - aw = webhook.(*admissionWebhook) - wh, err := o.admissionWebhook(path, aw) - if err != nil { - return nil, err - } - validatingWebhooks = append(validatingWebhooks, *wh) - } - - sort.Slice(validatingWebhooks, func(i, j int) bool { - return validatingWebhooks[i].Name < validatingWebhooks[j].Name - }) - - if len(validatingWebhooks) > 0 { - return &admissionregistration.ValidatingWebhookConfiguration{ - TypeMeta: metav1.TypeMeta{ - APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(), - Kind: "ValidatingWebhookConfiguration", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.validatingWebhookConfigName, - Annotations: map[string]string{ - // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. - "alpha.admissionwebhook.cert-manager.io": "true", - }, - }, - Webhooks: validatingWebhooks, - }, nil - } - return nil, nil -} - -func (o *generatorOptions) admissionWebhook(path string, wh *admissionWebhook) (*admissionregistration.Webhook, error) { - if wh.namespaceSelector == nil && o.service != nil && len(o.service.namespace) > 0 { - wh.namespaceSelector = &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "control-plane", - Operator: metav1.LabelSelectorOpDoesNotExist, - }, - }, - } - } - - webhook := &admissionregistration.Webhook{ - Name: wh.name, - Rules: wh.rules, - FailurePolicy: wh.failurePolicy, - NamespaceSelector: wh.namespaceSelector, - } - cc, err := o.getClientConfigWithPath(path) - if err != nil { - return nil, err - } - webhook.ClientConfig = *cc - return webhook, nil -} - -// getClientConfigWithPath constructs a WebhookClientConfig based on the server generatorOptions. -// It will use path to the set the path in WebhookClientConfig. -func (o *generatorOptions) getClientConfigWithPath(path string) (*admissionregistration.WebhookClientConfig, error) { - cc, err := o.getClientConfig() - if err != nil { - return nil, err - } - return cc, setPath(cc, path) -} - -func (o *generatorOptions) getClientConfig() (*admissionregistration.WebhookClientConfig, error) { - if o.host != nil && o.service != nil { - return nil, errors.New("URL and service can't be set at the same time") - } - cc := &admissionregistration.WebhookClientConfig{ - // Put an non-empty and not harmful CABundle here. - // Not doing this will cause the field - CABundle: []byte(`\n`), - } - if o.host != nil { - u := url.URL{ - Scheme: "https", - Host: net.JoinHostPort(*o.host, strconv.Itoa(int(o.port))), - } - urlString := u.String() - cc.URL = &urlString - } - if o.service != nil { - cc.Service = &admissionregistration.ServiceReference{ - Name: o.service.name, - Namespace: o.service.namespace, - // Path will be set later - } - } - return cc, nil -} - -// setPath sets the path in the WebhookClientConfig. -func setPath(cc *admissionregistration.WebhookClientConfig, path string) error { - if cc.URL != nil { - u, err := url.Parse(*cc.URL) - if err != nil { - return err - } - u.Path = path - urlString := u.String() - cc.URL = &urlString - } - if cc.Service != nil { - cc.Service.Path = &path - } - return nil -} - -// getService creates a corev1.Service object fronting the admission server. -func (o *generatorOptions) getService() runtime.Object { - if o.service == nil { - return nil - } - svc := &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.service.name, - Namespace: o.service.namespace, - Annotations: map[string]string{ - // Secret here only need name, since it will be in the same namespace as the service. - // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. - "alpha.service.cert-manager.io/serving-cert-secret-name": o.secret.Name, - }, - }, - Spec: corev1.ServiceSpec{ - Selector: o.service.selectors, - Ports: []corev1.ServicePort{ - { - // When using service, kube-apiserver will send admission request to port 443. - Port: 443, - TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: o.port}, - }, - }, - }, - } - return svc -} diff --git a/pkg/webhook/generator_test.go b/pkg/webhook/generator_test.go deleted file mode 100644 index fb9d790f3..000000000 --- a/pkg/webhook/generator_test.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "reflect" - "testing" - - "github.com/spf13/afero" -) - -var expected = map[string]string{ - "config/webhook/webhookmanifests.yaml": `apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - annotations: - alpha.admissionwebhook.cert-manager.io: "true" - creationTimestamp: null - name: test-mutating-webhook-cfg -webhooks: -- clientConfig: - caBundle: XG4= - service: - name: webhook-service - namespace: test-system - path: /bar - failurePolicy: Fail - name: bar-webhook - namespaceSelector: - matchExpressions: - - key: control-plane - operator: DoesNotExist - rules: - - apiGroups: - - apps - operations: - - CREATE - - UPDATE - resources: - - deployments ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - alpha.admissionwebhook.cert-manager.io: "true" - creationTimestamp: null - name: test-validating-webhook-cfg -webhooks: -- clientConfig: - caBundle: XG4= - service: - name: webhook-service - namespace: test-system - path: /baz - failurePolicy: Ignore - name: baz-webhook - namespaceSelector: - matchExpressions: - - key: control-plane - operator: DoesNotExist - rules: - - apiGroups: - - crew - apiVersions: - - v1 - operations: - - DELETE - resources: - - firstmates ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - alpha.service.cert-manager.io/serving-cert-secret-name: webhook-secret - creationTimestamp: null - name: webhook-service - namespace: test-system -spec: - ports: - - port: 443 - targetPort: 7890 - selector: - app: webhook-server -status: - loadBalancer: {} -`, - "config/default/manager_patch.yaml": `apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: controller-manager -spec: - template: - metadata: - labels: - app: webhook-server - spec: - containers: - - name: manager - ports: - - containerPort: 7890 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/test-cert - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: webhook-secret -`, -} - -func TestGenerator(t *testing.T) { - fs := afero.NewMemMapFs() - - o := &Options{ - WriterOptions: WriterOptions{ - InputDir: "./testdata/input", - outFs: fs, - }, - } - o.SetDefaults() - - if err := Generate(o); err != nil { - t.Fatalf("error when generating the files: %v", err) - } - - for name, content := range expected { - got := make([]byte, 2048) - f1, err := fs.Open(name) - if err != nil { - t.Fatalf("error when opening generated file %s: %v", name, err) - } - n, err := f1.Read(got) - if err != nil { - t.Fatalf("error when reading from generated file %s: %v", name, err) - } - if !reflect.DeepEqual([]byte(content), got[:n]) { - t.Fatalf("expected: %v, but got: %v", content, string(got[:n])) - } - } -} diff --git a/pkg/webhook/manifests.go b/pkg/webhook/manifests.go deleted file mode 100644 index 3bea5853e..000000000 --- a/pkg/webhook/manifests.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "bytes" - "fmt" - "path" - "strings" - "text/template" - - "github.com/ghodss/yaml" - "github.com/spf13/afero" - - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -// Options represent options for generating the webhook manifests. -type Options struct { - // WriterOptions specifies the input and output - WriterOptions - - generatorOptions -} - -// Generate generates RBAC manifests by parsing the RBAC annotations in Go source -// files specified in the input directory. -func Generate(o *Options) error { - if err := o.WriterOptions.Validate(); err != nil { - return err - } - - err := general.ParseDir(o.InputDir, o.parseAnnotation) - if err != nil { - return fmt.Errorf("failed to parse the input dir: %v", err) - } - - if len(o.webhooks) == 0 { - return nil - } - - objs, err := o.Generate() - if err != nil { - return err - } - - err = o.WriteObjectsToDisk(objs...) - if err != nil { - return err - } - - return o.controllerManagerPatch() -} - -func (o *Options) controllerManagerPatch() error { - var kustomizeLabelPatch = `apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: controller-manager -spec: - template: - metadata: -{{- with .Labels }} - labels: -{{ toYaml . | indent 8 }} -{{- end }} - spec: - containers: - - name: manager - ports: - - containerPort: {{ .Port }} - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: {{ .CertDir }} - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: {{ .SecretName }} -` - - type KustomizeLabelPatch struct { - Labels map[string]string - SecretName string - Port int32 - CertDir string - } - - p := KustomizeLabelPatch{ - Labels: o.service.selectors, - SecretName: o.secret.Name, - Port: o.port, - CertDir: o.certDir, - } - funcMap := template.FuncMap{ - "toYaml": toYAML, - "indent": indent, - } - temp, err := template.New("kustomizeLabelPatch").Funcs(funcMap).Parse(kustomizeLabelPatch) - if err != nil { - return err - } - buf := bytes.NewBuffer(nil) - if err := temp.Execute(buf, p); err != nil { - return err - } - return afero.WriteFile(o.outFs, path.Join(o.PatchOutputDir, "manager_patch.yaml"), buf.Bytes(), 0644) -} - -func toYAML(m map[string]string) (string, error) { - d, err := yaml.Marshal(m) - return string(d), err -} - -func indent(n int, s string) (string, error) { - buf := bytes.NewBuffer(nil) - for _, elem := range strings.Split(s, "\n") { - for i := 0; i < n; i++ { - _, err := buf.WriteRune(' ') - if err != nil { - return "", err - } - } - _, err := buf.WriteString(elem) - if err != nil { - return "", err - } - _, err = buf.WriteRune('\n') - if err != nil { - return "", err - } - } - return strings.TrimRight(buf.String(), " \n"), nil -} diff --git a/pkg/webhook/manifests_test.go b/pkg/webhook/manifests_test.go deleted file mode 100644 index e2685af97..000000000 --- a/pkg/webhook/manifests_test.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "go/token" - "reflect" - "testing" - - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -func TestManifestGenerator(t *testing.T) { - failFP := admissionregistrationv1beta1.Fail - ignoreFP := admissionregistrationv1beta1.Ignore - tests := []struct { - content string - exp map[string]webhook - }{ - { - content: `package foo - import ( - "fmt" - "time" - ) - - // comment only - - // +kubebuilder:webhook:groups=apps,resources=deployments,verbs=CREATE;UPDATE - // +kubebuilder:webhook:name=bar-webhook,path=/bar,type=mutating,failure-policy=Fail - // bar function - func bar() { - fmt.Println(time.Now()) - } - - // +kubebuilder:webhook:groups=crew,versions=v1,resources=firstmates,verbs=delete - // +kubebuilder:webhook:name=baz-webhook,path=/baz,type=validating,failure-policy=ignore - // baz function - func baz() { - fmt.Println(time.Now()) - }`, - exp: map[string]webhook{ - "/bar": &admissionWebhook{ - name: "bar-webhook", - typ: mutatingWebhook, - path: "/bar", - rules: []admissionregistrationv1beta1.RuleWithOperations{ - { - Rule: admissionregistrationv1beta1.Rule{ - APIGroups: []string{"apps"}, - Resources: []string{"deployments"}, - }, - Operations: []admissionregistrationv1beta1.OperationType{ - admissionregistrationv1beta1.Create, - admissionregistrationv1beta1.Update, - }, - }, - }, - failurePolicy: &failFP, - }, - "/baz": &admissionWebhook{ - name: "baz-webhook", - typ: validatingWebhook, - path: "/baz", - rules: []admissionregistrationv1beta1.RuleWithOperations{ - { - Rule: admissionregistrationv1beta1.Rule{ - APIGroups: []string{"crew"}, - APIVersions: []string{"v1"}, - Resources: []string{"firstmates"}, - }, - Operations: []admissionregistrationv1beta1.OperationType{ - admissionregistrationv1beta1.Delete, - }, - }, - }, - failurePolicy: &ignoreFP, - }, - }, - }, - } - - for _, test := range tests { - o := &Options{ - WriterOptions: WriterOptions{ - InputDir: "test.go", - }, - } - fset := token.NewFileSet() - err := general.ParseFile(fset, "test.go", test.content, o.parseAnnotation) - if err != nil { - t.Errorf("processFile should have succeeded, but got error: %v", err) - } - if !reflect.DeepEqual(test.exp, o.webhooks) { - t.Errorf("webhooks should have matched, expected %#v and got %#v", test.exp, o.webhooks) - } - } -} diff --git a/pkg/webhook/parser.go b/pkg/webhook/parser.go index 9ac73cef0..d4ba805a7 100644 --- a/pkg/webhook/parser.go +++ b/pkg/webhook/parser.go @@ -14,223 +14,155 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package webhook contains libraries for generating webhookconfig manifests +// from markers in Go source files. +// +// The markers take the form: +// +// +kubebuilder:webhook:failurePolicy=,groups=<[]string>,resources=<[]string>,verbs=<[]string>,versions=<[]string>,name=,path=,mutating= package webhook import ( - "errors" - "fmt" - "log" - "strconv" "strings" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) + admissionreg "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -const webhookAnnotationPrefix = "kubebuilder:webhook" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" +) var ( - webhookTags = sets.NewString([]string{"groups", "versions", "resources", "verbs", "type", "name", "path", "failure-policy"}...) - serverTags = sets.NewString([]string{"port", "cert-dir", "service", "selector", "secret", "host", "mutating-webhook-config-name", "validating-webhook-config-name"}...) + // ConfigDefinition s a marker for defining Webhook manifests. + // Call ToWebhook on the value to get a Kubernetes Webhook. + ConfigDefinition = markers.Must(markers.MakeDefinition("kubebuilder:webhook", markers.DescribesPackage, Config{})) ) -// parseAnnotation parses webhook annotations -func (o *Options) parseAnnotation(commentText string) error { - webhookKVMap, serverKVMap := map[string]string{}, map[string]string{} - for _, comment := range strings.Split(commentText, "\n") { - comment := strings.TrimSpace(comment) - anno := general.GetAnnotation(comment, webhookAnnotationPrefix) - if len(anno) == 0 { - continue - } - for _, elem := range strings.Split(anno, ",") { - key, value, err := general.ParseKV(elem) - if err != nil { - log.Fatalf("// +kubebuilder:webhook: tags must be key value pairs. Example "+ - "keys [groups=,resources=,verbs=] "+ - "Got string: [%s]", anno) - } - switch { - case webhookTags.Has(key): - webhookKVMap[key] = value - case serverTags.Has(key): - serverKVMap[key] = value - } - } - } +// Config is a marker value that describes a kubernetes Webhook config. +type Config struct { + Mutating bool + FailurePolicy string + + Groups []string + Resources []string + Verbs []string + Versions []string + + Name string + Path string +} - if err := o.parseWebhookAnnotation(webhookKVMap); err != nil { - return err +// verbToAPIVariant converts a marker's verb to the proper value for the API. +// Unrecognized verbs are passed through. +func verbToAPIVariant(verbRaw string) admissionreg.OperationType { + switch strings.ToLower(verbRaw) { + case strings.ToLower(string(admissionreg.Create)): + return admissionreg.Create + case strings.ToLower(string(admissionreg.Update)): + return admissionreg.Update + case strings.ToLower(string(admissionreg.Delete)): + return admissionreg.Delete + case strings.ToLower(string(admissionreg.Connect)): + return admissionreg.Connect + case strings.ToLower(string(admissionreg.OperationAll)): + return admissionreg.OperationAll + default: + return admissionreg.OperationType(verbRaw) } - return o.parseServerAnnotation(serverKVMap) } -// parseWebhookAnnotation parses webhook annotations in the same comment group -// nolint: gocyclo -func (o *Options) parseWebhookAnnotation(kvMap map[string]string) error { - if len(kvMap) == 0 { - return nil +// ToRule converts this rule to its Kubernetes API form. +func (c Config) ToWebhook() admissionreg.Webhook { + whConfig := admissionreg.RuleWithOperations{ + Rule: admissionreg.Rule{ + APIGroups: c.Groups, + APIVersions: c.Versions, + Resources: c.Resources, + }, + Operations: make([]admissionreg.OperationType, len(c.Verbs)), } - rule := admissionregistrationv1beta1.RuleWithOperations{} - w := &admissionWebhook{} - for key, value := range kvMap { - switch key { - case "groups": - values := strings.Split(value, ";") - normalized := []string{} - for _, v := range values { - if v == "core" { - normalized = append(normalized, "") - } else { - normalized = append(normalized, v) - } - } - rule.APIGroups = values - - case "versions": - values := strings.Split(value, ";") - rule.APIVersions = values - - case "resources": - values := strings.Split(value, ";") - rule.Resources = values - - case "verbs": - values := strings.Split(value, ";") - var ops []admissionregistrationv1beta1.OperationType - for _, v := range values { - switch strings.ToLower(v) { - case strings.ToLower(string(admissionregistrationv1beta1.Create)): - ops = append(ops, admissionregistrationv1beta1.Create) - case strings.ToLower(string(admissionregistrationv1beta1.Update)): - ops = append(ops, admissionregistrationv1beta1.Update) - case strings.ToLower(string(admissionregistrationv1beta1.Delete)): - ops = append(ops, admissionregistrationv1beta1.Delete) - case strings.ToLower(string(admissionregistrationv1beta1.Connect)): - ops = append(ops, admissionregistrationv1beta1.Connect) - case strings.ToLower(string(admissionregistrationv1beta1.OperationAll)): - ops = append(ops, admissionregistrationv1beta1.OperationAll) - default: - return fmt.Errorf("unknown operation: %v", v) - } - } - rule.Operations = ops - - case "type": - switch strings.ToLower(value) { - case "mutating": - w.typ = mutatingWebhook - case "validating": - w.typ = validatingWebhook - default: - return fmt.Errorf("unknown webhook type: %v", value) - } - case "name": - w.name = value - - case "path": - w.path = value - - case "failure-policy": - switch strings.ToLower(value) { - case strings.ToLower(string(admissionregistrationv1beta1.Ignore)): - fp := admissionregistrationv1beta1.Ignore - w.failurePolicy = &fp - case strings.ToLower(string(admissionregistrationv1beta1.Fail)): - fp := admissionregistrationv1beta1.Fail - w.failurePolicy = &fp - default: - return fmt.Errorf("unknown webhook failure policy: %v", value) - } + for i, verbRaw := range c.Verbs { + whConfig.Operations[i] = verbToAPIVariant(verbRaw) + } + + // fix the group names, since letting people type "core" is nice + for i, group := range whConfig.APIGroups { + if group == "core" { + whConfig.APIGroups[i] = "" } } - w.rules = []admissionregistrationv1beta1.RuleWithOperations{rule} - if o.webhooks == nil { - o.webhooks = map[string]webhook{} + + var failurePolicy admissionreg.FailurePolicyType + switch strings.ToLower(c.FailurePolicy) { + case strings.ToLower(string(admissionreg.Ignore)): + failurePolicy = admissionreg.Ignore + case strings.ToLower(string(admissionreg.Fail)): + failurePolicy = admissionreg.Fail + default: + failurePolicy = admissionreg.FailurePolicyType(c.FailurePolicy) + } + path := c.Path + return admissionreg.Webhook{ + Name: c.Name, + Rules: []admissionreg.RuleWithOperations{whConfig}, + FailurePolicy: &failurePolicy, + ClientConfig: admissionreg.WebhookClientConfig{ + Service: &admissionreg.ServiceReference{ + Path: &path, + }, + }, } - o.webhooks[w.path] = w - return nil } -// parseWebhookAnnotation parses webhook server annotations in the same comment group -// nolint: gocyclo -func (o *Options) parseServerAnnotation(kvMap map[string]string) error { - if len(kvMap) == 0 { - return nil - } - for key, value := range kvMap { - switch key { - case "port": - port, err := strconv.Atoi(value) - if err != nil { - return err - } - o.port = int32(port) - case "cert-dir": - o.certDir = value - case "service": - // format: - split := strings.Split(value, ":") - if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 { - return fmt.Errorf("invalid service format: expect , but got %q", value) - } - if o.service == nil { - o.service = &service{} - } - o.service.namespace = split[0] - o.service.name = split[1] - case "selector": - // selector of the service. Format: - split := strings.Split(value, ";") - if len(split) == 0 { - return fmt.Errorf("invalid selector format: expect , but got %q", value) - } - if o.service == nil { - o.service = &service{} - } - for _, v := range split { - l := strings.Split(v, ":") - if len(l) != 2 || len(l[0]) == 0 || len(l[1]) == 0 { - return fmt.Errorf("invalid selector format: expect , but got %q", value) - } - if o.service.selectors == nil { - o.service.selectors = map[string]string{} - } - o.service.selectors[l[0]] = l[1] - } - case "host": - if len(value) == 0 { - return errors.New("host should not be empty if specified") - } - o.host = &value +// Generator is a genall.Generator that generates Webhook manifests. +type Generator struct{} - case "mutating-webhook-config-name": - if len(value) == 0 { - return errors.New("mutating-webhook-config-name should not be empty if specified") - } - o.mutatingWebhookConfigName = value +func (Generator) RegisterMarkers(into *markers.Registry) error { + return into.Register(ConfigDefinition) +} +func (Generator) Generate(ctx *genall.GenerationContext) error { + var mutatingCfgs []admissionreg.Webhook + var validatingCfgs []admissionreg.Webhook + for _, root := range ctx.Roots { + markerSet, err := markers.PackageMarkers(ctx.Collector, root) + if err != nil { + root.AddError(err) + } - case "validating-webhook-config-name": - if len(value) == 0 { - return errors.New("validating-webhook-config-name should not be empty if specified") + for _, cfg := range markerSet[ConfigDefinition.Name] { + cfg := cfg.(Config) + if cfg.Mutating { + mutatingCfgs = append(mutatingCfgs, cfg.ToWebhook()) + } else { + validatingCfgs = append(validatingCfgs, cfg.ToWebhook()) } - o.validatingWebhookConfigName = value + } + } - case "secret": - // format: - split := strings.Split(value, ":") - if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 { - return fmt.Errorf("invalid secret format: expect , but got %q", value) - } - if o.secret == nil { - o.secret = &types.NamespacedName{} - } - o.secret.Namespace = split[0] - o.secret.Name = split[1] + if len(mutatingCfgs) > 0 { + if err := ctx.WriteYAML(admissionreg.MutatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "MutatingWebhookConfiguration", + APIVersion: admissionreg.SchemeGroupVersion.String(), + }, + Webhooks: mutatingCfgs, + }, "mutating-webhook.yaml"); err != nil { + return err } } + + if len(validatingCfgs) > 0 { + if err := ctx.WriteYAML(admissionreg.ValidatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: admissionreg.SchemeGroupVersion.String(), + }, + Webhooks: validatingCfgs, + }, "validating-webhook.yaml"); err != nil { + return err + } + } + return nil } diff --git a/pkg/webhook/parser_test.go b/pkg/webhook/parser_test.go deleted file mode 100644 index 9deb0fcdd..000000000 --- a/pkg/webhook/parser_test.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "go/token" - "reflect" - "testing" - - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -func TestParseWebhook(t *testing.T) { - failFP := admissionregistrationv1beta1.Fail - ignoreFP := admissionregistrationv1beta1.Ignore - tests := []struct { - content string - exp map[string]webhook - }{ - { - content: `package foo - import ( - "fmt" - "time" - ) - - // comment only - - // +kubebuilder:webhook:groups=apps,resources=deployments,verbs=CREATE;UPDATE - // +kubebuilder:webhook:name=bar-webhook,path=/bar,type=mutating,failure-policy=Fail - // bar function - func bar() { - fmt.Println(time.Now()) - } - - // +kubebuilder:webhook:groups=crew,versions=v1,resources=firstmates,verbs=delete - // +kubebuilder:webhook:name=baz-webhook,path=/baz,type=validating,failure-policy=ignore - // baz function - func baz() { - fmt.Println(time.Now()) - }`, - exp: map[string]webhook{ - "/bar": &admissionWebhook{ - name: "bar-webhook", - typ: mutatingWebhook, - path: "/bar", - rules: []admissionregistrationv1beta1.RuleWithOperations{ - { - Rule: admissionregistrationv1beta1.Rule{ - APIGroups: []string{"apps"}, - Resources: []string{"deployments"}, - }, - Operations: []admissionregistrationv1beta1.OperationType{ - admissionregistrationv1beta1.Create, - admissionregistrationv1beta1.Update, - }, - }, - }, - failurePolicy: &failFP, - }, - "/baz": &admissionWebhook{ - name: "baz-webhook", - typ: validatingWebhook, - path: "/baz", - rules: []admissionregistrationv1beta1.RuleWithOperations{ - { - Rule: admissionregistrationv1beta1.Rule{ - APIGroups: []string{"crew"}, - APIVersions: []string{"v1"}, - Resources: []string{"firstmates"}, - }, - Operations: []admissionregistrationv1beta1.OperationType{ - admissionregistrationv1beta1.Delete, - }, - }, - }, - failurePolicy: &ignoreFP, - }, - }, - }, - } - - for _, test := range tests { - o := &Options{ - WriterOptions: WriterOptions{ - InputDir: "test.go", - }, - } - fset := token.NewFileSet() - err := general.ParseFile(fset, "test.go", test.content, o.parseAnnotation) - if err != nil { - t.Errorf("processFile should have succeeded, but got error: %v", err) - } - if !reflect.DeepEqual(test.exp, o.webhooks) { - t.Errorf("webhooks should have matched, expected %#v and got %#v", test.exp, o.webhooks) - } - } -} - -func TestParseWebhookServer(t *testing.T) { - tests := []struct { - content string - exp *generatorOptions - }{ - { - content: `package foo - import ( - "fmt" - "time" - ) - - // +kubebuilder:webhook:port=7890,cert-dir=/tmp/test-cert,service=test-system:webhook-service,selector=app:webhook-server - // +kubebuilder:webhook:secret=test-system:webhook-secret - // +kubebuilder:webhook:mutating-webhook-config-name=test-mutating-webhook-cfg,validating-webhook-config-name=test-validating-webhook-cfg - // bar function - func bar() { - fmt.Println(time.Now()) - }`, - exp: &generatorOptions{ - port: 7890, - certDir: "/tmp/test-cert", - mutatingWebhookConfigName: "test-mutating-webhook-cfg", - validatingWebhookConfigName: "test-validating-webhook-cfg", - service: &service{ - namespace: "test-system", - name: "webhook-service", - selectors: map[string]string{ - "app": "webhook-server", - }, - }, - secret: &types.NamespacedName{ - Namespace: "test-system", - Name: "webhook-secret", - }, - }, - }, - } - - for _, test := range tests { - o := &Options{ - WriterOptions: WriterOptions{ - InputDir: "test.go", - }, - generatorOptions: generatorOptions{}, - } - fset := token.NewFileSet() - err := general.ParseFile(fset, "test.go", test.content, o.parseAnnotation) - if err != nil { - t.Errorf("processFile should have succeeded, but got error: %v", err) - } - if !reflect.DeepEqual(test.exp, &o.generatorOptions) { - t.Errorf("webhook server should have matched, expected %#v but got %#v", test.exp, o.generatorOptions) - } - } -} diff --git a/pkg/webhook/testdata/input/server.go b/pkg/webhook/testdata/input/server.go deleted file mode 100644 index b3901fb31..000000000 --- a/pkg/webhook/testdata/input/server.go +++ /dev/null @@ -1,16 +0,0 @@ -package foo - -import ( - "fmt" - "time" -) - -// +kubebuilder:webhook:port=7890,cert-dir=/tmp/test-cert -// +kubebuilder:webhook:service=test-system:webhook-service,selector=app:webhook-server -// +kubebuilder:webhook:secret=test-system:webhook-secret -// +kubebuilder:webhook:mutating-webhook-config-name=test-mutating-webhook-cfg,validating-webhook-config-name=test-validating-webhook-cfg -// bar function -// nolint -func foo() { - fmt.Println(time.Now()) -} diff --git a/pkg/webhook/testdata/input/webhooks.go b/pkg/webhook/testdata/input/webhooks.go deleted file mode 100644 index 331ad691f..000000000 --- a/pkg/webhook/testdata/input/webhooks.go +++ /dev/null @@ -1,24 +0,0 @@ -package foo - -import ( - "fmt" - "time" -) - -// comment only - -// +kubebuilder:webhook:groups=apps,resources=deployments,verbs=CREATE;UPDATE -// +kubebuilder:webhook:name=bar-webhook,path=/bar,type=mutating,failure-policy=Fail -// bar function -// nolint -func bar() { - fmt.Println(time.Now()) -} - -// +kubebuilder:webhook:groups=crew,versions=v1,resources=firstmates,verbs=delete -// +kubebuilder:webhook:name=baz-webhook,path=/baz,type=validating,failure-policy=ignore -// baz function -// nolint -func baz() { - fmt.Println(time.Now()) -} diff --git a/pkg/webhook/types.go b/pkg/webhook/types.go deleted file mode 100644 index a7129e921..000000000 --- a/pkg/webhook/types.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -// webhookType defines the type of a webhook -type webhookType int - -const ( - _ = iota - // mutatingWebhook represents mutating type webhook - mutatingWebhook webhookType = iota - // validatingWebhook represents validating type webhook - validatingWebhook -) - -// webhook defines the basics that a webhook should support. -type webhook interface { - // GetType returns the Type of the webhook. - // e.g. mutating or validating - GetType() webhookType - // Validate validates if the webhook itself is valid. - // If invalid, a non-nil error will be returned. - Validate() error -} diff --git a/pkg/webhook/writer.go b/pkg/webhook/writer.go deleted file mode 100644 index ecbb45f2a..000000000 --- a/pkg/webhook/writer.go +++ /dev/null @@ -1,92 +0,0 @@ -package webhook - -import ( - "bytes" - "fmt" - "path" - "path/filepath" - - "github.com/ghodss/yaml" - "github.com/spf13/afero" - - "k8s.io/apimachinery/pkg/runtime" -) - -// WriterOptions specifies the input and output. -type WriterOptions struct { - InputDir string - OutputDir string - PatchOutputDir string - - // inFs is filesystem to be used for reading input - inFs afero.Fs - // outFs is filesystem to be used for writing out the result - outFs afero.Fs -} - -// SetDefaults sets up the default options for RBAC Manifest generator. -func (o *WriterOptions) SetDefaults() { - if o.inFs == nil { - o.inFs = afero.NewOsFs() - } - if o.outFs == nil { - o.outFs = afero.NewOsFs() - } - - if len(o.InputDir) == 0 { - o.InputDir = filepath.Join(".", "pkg", "webhook") - } - if len(o.OutputDir) == 0 { - o.OutputDir = filepath.Join(".", "config", "webhook") - } - if len(o.PatchOutputDir) == 0 { - o.PatchOutputDir = filepath.Join(".", "config", "default") - } -} - -// Validate validates the input options. -func (o *WriterOptions) Validate() error { - if _, err := o.inFs.Stat(o.InputDir); err != nil { - return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err) - } - return nil -} - -// WriteObjectsToDisk writes object to the location specified in WriterOptions. -func (o *WriterOptions) WriteObjectsToDisk(objects ...runtime.Object) error { - exists, err := afero.DirExists(o.outFs, o.OutputDir) - if err != nil { - return err - } - if !exists { - err = o.outFs.MkdirAll(o.OutputDir, 0766) - if err != nil { - return err - } - } - - var buf bytes.Buffer - isFirstObject := true - for _, obj := range objects { - if !isFirstObject { - _, err = buf.WriteString("---\n") - if err != nil { - return err - } - } - marshalled, err := yaml.Marshal(obj) - if err != nil { - return err - } - _, err = buf.Write(marshalled) - if err != nil { - return err - } - isFirstObject = false - } - err = afero.WriteFile(o.outFs, path.Join(o.OutputDir, "webhookmanifests.yaml"), buf.Bytes(), 0644) - if err != nil { - return err - } - return nil -} From f2da9ab6e7f3112d5efc652c346f28020a217900 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Wed, 1 May 2019 22:47:51 -0700 Subject: [PATCH 11/14] Fix testData import path It had an old import path pointing to the wrong place, which causes all sorts of build failures. --- testData/Dockerfile | 6 +++--- testData/Makefile | 2 +- testData/PROJECT | 2 +- testData/cmd/manager/main.go | 4 ++-- testData/go.mod | 2 +- testData/pkg/apis/addtoscheme_fun_v1alpha1.go | 2 +- testData/pkg/apis/fun/v1alpha1/doc.go | 2 +- testData/pkg/apis/fun/v1alpha1/register.go | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/testData/Dockerfile b/testData/Dockerfile index 2267c4135..01e723697 100644 --- a/testData/Dockerfile +++ b/testData/Dockerfile @@ -2,16 +2,16 @@ FROM golang:1.10.3 as builder # Copy in the go src -WORKDIR /go/src/sigs.k8s.io/controller-tools/pkg/crd/generator/testData +WORKDIR /go/src/sigs.k8s.io/controller-tools/testData COPY pkg/ pkg/ COPY cmd/ cmd/ COPY vendor/ vendor/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager sigs.k8s.io/controller-tools/pkg/crd/generator/testData/cmd/manager +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager sigs.k8s.io/controller-tools/testData/cmd/manager # Copy the controller-manager into a thin image FROM ubuntu:latest WORKDIR / -COPY --from=builder /go/src/sigs.k8s.io/controller-tools/pkg/crd/generator/testData/manager . +COPY --from=builder /go/src/sigs.k8s.io/controller-tools/testData/manager . ENTRYPOINT ["/manager"] diff --git a/testData/Makefile b/testData/Makefile index ea8c68ab7..27373067d 100644 --- a/testData/Makefile +++ b/testData/Makefile @@ -10,7 +10,7 @@ test: generate fmt vet manifests # Build manager binary manager: generate fmt vet - go build -o bin/manager sigs.k8s.io/controller-tools/pkg/crd/generator/testData/cmd/manager + go build -o bin/manager sigs.k8s.io/controller-tools/testData/cmd/manager # Run against the configured Kubernetes cluster in ~/.kube/config run: generate fmt vet diff --git a/testData/PROJECT b/testData/PROJECT index 540325ea2..95c1ad16f 100644 --- a/testData/PROJECT +++ b/testData/PROJECT @@ -1,3 +1,3 @@ version: "1" domain: myk8s.io -repo: sigs.k8s.io/controller-tools/pkg/crd/generator/testData +repo: sigs.k8s.io/controller-tools/testData diff --git a/testData/cmd/manager/main.go b/testData/cmd/manager/main.go index 8a7e8a79b..483198069 100644 --- a/testData/cmd/manager/main.go +++ b/testData/cmd/manager/main.go @@ -22,8 +22,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - "sigs.k8s.io/controller-tools/pkg/crd/generator/testData/pkg/apis" - "sigs.k8s.io/controller-tools/pkg/crd/generator/testData/pkg/controller" + "sigs.k8s.io/controller-tools/testData/pkg/apis" + "sigs.k8s.io/controller-tools/testData/pkg/controller" ) func main() { diff --git a/testData/go.mod b/testData/go.mod index 08dcd6d54..6dd0264b1 100644 --- a/testData/go.mod +++ b/testData/go.mod @@ -1,4 +1,4 @@ -module sigs.k8s.io/controller-tools/pkg/crd/generator/testData +module sigs.k8s.io/controller-tools/testData go 1.12 diff --git a/testData/pkg/apis/addtoscheme_fun_v1alpha1.go b/testData/pkg/apis/addtoscheme_fun_v1alpha1.go index 1bbe4b6e0..bafd629aa 100644 --- a/testData/pkg/apis/addtoscheme_fun_v1alpha1.go +++ b/testData/pkg/apis/addtoscheme_fun_v1alpha1.go @@ -16,7 +16,7 @@ limitations under the License. package apis import ( - "sigs.k8s.io/controller-tools/pkg/crd/generator/testData/pkg/apis/fun/v1alpha1" + "sigs.k8s.io/controller-tools/testData/pkg/apis/fun/v1alpha1" ) func init() { diff --git a/testData/pkg/apis/fun/v1alpha1/doc.go b/testData/pkg/apis/fun/v1alpha1/doc.go index 664103237..8edbd6f95 100644 --- a/testData/pkg/apis/fun/v1alpha1/doc.go +++ b/testData/pkg/apis/fun/v1alpha1/doc.go @@ -16,7 +16,7 @@ limitations under the License. // Package v1alpha1 contains API Schema definitions for the fun v1alpha1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/controller-tools/pkg/crd/generator/testData/pkg/apis/fun +// +k8s:conversion-gen=sigs.k8s.io/controller-tools/testData/pkg/apis/fun // +k8s:defaulter-gen=TypeMeta // +groupName=fun.myk8s.io package v1alpha1 diff --git a/testData/pkg/apis/fun/v1alpha1/register.go b/testData/pkg/apis/fun/v1alpha1/register.go index 3d1ffd0d0..6f94a70c9 100644 --- a/testData/pkg/apis/fun/v1alpha1/register.go +++ b/testData/pkg/apis/fun/v1alpha1/register.go @@ -18,7 +18,7 @@ limitations under the License. // Package v1alpha1 contains API Schema definitions for the fun v1alpha1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/controller-tools/pkg/crd/generator/testData/pkg/apis/fun +// +k8s:conversion-gen=sigs.k8s.io/controller-tools/testData/pkg/apis/fun // +k8s:defaulter-gen=TypeMeta // +groupName=fun.myk8s.io package v1alpha1 From 35b0aa25652a0a67205eed86df41be55c9d733fa Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Tue, 30 Apr 2019 22:45:23 -0700 Subject: [PATCH 12/14] Update deps for new parsing Update deps to required versions. --- Gopkg.lock | 178 +- Gopkg.toml | 21 +- go.mod | 18 +- go.sum | 54 +- vendor/github.com/ghodss/yaml/.travis.yml | 7 - vendor/github.com/hpcloud/tail/.gitignore | 3 + vendor/github.com/hpcloud/tail/.travis.yml | 18 + vendor/github.com/hpcloud/tail/CHANGES.md | 63 + vendor/github.com/hpcloud/tail/Dockerfile | 19 + vendor/github.com/hpcloud/tail/LICENSE.txt | 21 + vendor/github.com/hpcloud/tail/Makefile | 11 + vendor/github.com/hpcloud/tail/README.md | 28 + vendor/github.com/hpcloud/tail/appveyor.yml | 11 + .../hpcloud/tail/ratelimiter/Licence | 7 + .../hpcloud/tail/ratelimiter/leakybucket.go | 97 + .../hpcloud/tail/ratelimiter/memory.go | 58 + .../hpcloud/tail/ratelimiter/storage.go | 6 + vendor/github.com/hpcloud/tail/tail.go | 438 + vendor/github.com/hpcloud/tail/tail_posix.go | 11 + .../github.com/hpcloud/tail/tail_windows.go | 12 + vendor/github.com/hpcloud/tail/util/util.go | 48 + .../hpcloud/tail/watch/filechanges.go | 36 + .../github.com/hpcloud/tail/watch/inotify.go | 128 + .../hpcloud/tail/watch/inotify_tracker.go | 260 + .../github.com/hpcloud/tail/watch/polling.go | 118 + vendor/github.com/hpcloud/tail/watch/watch.go | 20 + .../hpcloud/tail/winfile/winfile.go | 92 + vendor/github.com/onsi/ginkgo/.gitignore | 7 + vendor/github.com/onsi/ginkgo/.travis.yml | 15 + vendor/github.com/onsi/ginkgo/CHANGELOG.md | 218 + vendor/github.com/onsi/ginkgo/CONTRIBUTING.md | 33 + vendor/github.com/onsi/ginkgo/LICENSE | 20 + vendor/github.com/onsi/ginkgo/README.md | 121 + vendor/github.com/onsi/ginkgo/RELEASING.md | 14 + .../github.com/onsi/ginkgo/config/config.go | 200 + vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 619 + .../internal/codelocation/code_location.go | 32 + .../internal/containernode/container_node.go | 151 + .../onsi/ginkgo/internal/failer/failer.go | 92 + .../ginkgo/internal/leafnodes/benchmarker.go | 103 + .../ginkgo/internal/leafnodes/interfaces.go | 19 + .../onsi/ginkgo/internal/leafnodes/it_node.go | 47 + .../ginkgo/internal/leafnodes/measure_node.go | 62 + .../onsi/ginkgo/internal/leafnodes/runner.go | 117 + .../ginkgo/internal/leafnodes/setup_nodes.go | 48 + .../ginkgo/internal/leafnodes/suite_nodes.go | 55 + .../synchronized_after_suite_node.go | 90 + .../synchronized_before_suite_node.go | 181 + .../onsi/ginkgo/internal/remote/aggregator.go | 249 + .../internal/remote/forwarding_reporter.go | 147 + .../internal/remote/output_interceptor.go | 13 + .../remote/output_interceptor_unix.go | 83 + .../internal/remote/output_interceptor_win.go | 36 + .../onsi/ginkgo/internal/remote/server.go | 224 + .../remote/syscall_dup_linux_arm64.go | 11 + .../internal/remote/syscall_dup_solaris.go | 9 + .../internal/remote/syscall_dup_unix.go | 11 + .../onsi/ginkgo/internal/spec/spec.go | 247 + .../onsi/ginkgo/internal/spec/specs.go | 144 + .../internal/spec_iterator/index_computer.go | 55 + .../spec_iterator/parallel_spec_iterator.go | 59 + .../spec_iterator/serial_spec_iterator.go | 45 + .../sharded_parallel_spec_iterator.go | 47 + .../internal/spec_iterator/spec_iterator.go | 20 + .../ginkgo/internal/specrunner/random_id.go | 15 + .../ginkgo/internal/specrunner/spec_runner.go | 411 + .../onsi/ginkgo/internal/suite/suite.go | 190 + .../internal/testingtproxy/testing_t_proxy.go | 76 + .../ginkgo/internal/writer/fake_writer.go | 36 + .../onsi/ginkgo/internal/writer/writer.go | 89 + .../onsi/ginkgo/reporters/default_reporter.go | 84 + .../onsi/ginkgo/reporters/fake_reporter.go | 59 + .../onsi/ginkgo/reporters/junit_reporter.go | 152 + .../onsi/ginkgo/reporters/reporter.go | 15 + .../reporters/stenographer/console_logging.go | 64 + .../stenographer/fake_stenographer.go | 142 + .../reporters/stenographer/stenographer.go | 572 + .../stenographer/support/go-colorable/LICENSE | 21 + .../support/go-colorable/README.md | 43 + .../support/go-colorable/colorable_others.go | 24 + .../support/go-colorable/colorable_windows.go | 783 + .../support/go-colorable/noncolorable.go | 57 + .../stenographer/support/go-isatty/LICENSE | 9 + .../stenographer/support/go-isatty/README.md | 37 + .../stenographer/support/go-isatty/doc.go | 2 + .../support/go-isatty/isatty_appengine.go | 9 + .../support/go-isatty/isatty_bsd.go | 18 + .../support/go-isatty/isatty_linux.go | 18 + .../support/go-isatty/isatty_solaris.go | 16 + .../support/go-isatty/isatty_windows.go | 19 + .../ginkgo/reporters/teamcity_reporter.go | 93 + .../onsi/ginkgo/types/code_location.go | 15 + .../onsi/ginkgo/types/synchronization.go | 30 + vendor/github.com/onsi/ginkgo/types/types.go | 174 + vendor/github.com/onsi/gomega/.gitignore | 5 + vendor/github.com/onsi/gomega/.travis.yml | 17 + vendor/github.com/onsi/gomega/CHANGELOG.md | 136 + vendor/github.com/onsi/gomega/CONTRIBUTING.md | 14 + vendor/github.com/onsi/gomega/LICENSE | 20 + vendor/github.com/onsi/gomega/Makefile | 6 + vendor/github.com/onsi/gomega/README.md | 21 + vendor/github.com/onsi/gomega/RELEASING.md | 12 + .../github.com/onsi/gomega/format/format.go | 382 + vendor/github.com/onsi/gomega/go.mod | 15 + vendor/github.com/onsi/gomega/go.sum | 24 + vendor/github.com/onsi/gomega/gomega_dsl.go | 429 + .../gomega/internal/assertion/assertion.go | 105 + .../asyncassertion/async_assertion.go | 194 + .../internal/oraclematcher/oracle_matcher.go | 25 + .../testingtsupport/testing_t_support.go | 60 + vendor/github.com/onsi/gomega/matchers.go | 427 + vendor/github.com/onsi/gomega/matchers/and.go | 63 + .../matchers/assignable_to_type_of_matcher.go | 35 + .../onsi/gomega/matchers/attributes_slice.go | 14 + .../onsi/gomega/matchers/be_a_directory.go | 54 + .../onsi/gomega/matchers/be_a_regular_file.go | 54 + .../gomega/matchers/be_an_existing_file.go | 38 + .../onsi/gomega/matchers/be_closed_matcher.go | 46 + .../onsi/gomega/matchers/be_empty_matcher.go | 27 + .../matchers/be_equivalent_to_matcher.go | 34 + .../onsi/gomega/matchers/be_false_matcher.go | 26 + .../onsi/gomega/matchers/be_identical_to.go | 37 + .../onsi/gomega/matchers/be_nil_matcher.go | 18 + .../gomega/matchers/be_numerically_matcher.go | 132 + .../onsi/gomega/matchers/be_sent_matcher.go | 71 + .../gomega/matchers/be_temporally_matcher.go | 66 + .../onsi/gomega/matchers/be_true_matcher.go | 26 + .../onsi/gomega/matchers/be_zero_matcher.go | 28 + .../onsi/gomega/matchers/consist_of.go | 80 + .../matchers/contain_element_matcher.go | 56 + .../matchers/contain_substring_matcher.go | 38 + .../onsi/gomega/matchers/equal_matcher.go | 42 + .../onsi/gomega/matchers/have_cap_matcher.go | 28 + .../onsi/gomega/matchers/have_key_matcher.go | 54 + .../matchers/have_key_with_value_matcher.go | 74 + .../onsi/gomega/matchers/have_len_matcher.go | 28 + .../gomega/matchers/have_occurred_matcher.go | 33 + .../gomega/matchers/have_prefix_matcher.go | 36 + .../gomega/matchers/have_suffix_matcher.go | 36 + .../gomega/matchers/match_error_matcher.go | 51 + .../gomega/matchers/match_json_matcher.go | 65 + .../gomega/matchers/match_regexp_matcher.go | 43 + .../onsi/gomega/matchers/match_xml_matcher.go | 134 + .../gomega/matchers/match_yaml_matcher.go | 76 + vendor/github.com/onsi/gomega/matchers/not.go | 30 + vendor/github.com/onsi/gomega/matchers/or.go | 67 + .../onsi/gomega/matchers/panic_matcher.go | 46 + .../onsi/gomega/matchers/receive_matcher.go | 128 + .../matchers/semi_structured_data_support.go | 92 + .../onsi/gomega/matchers/succeed_matcher.go | 33 + .../goraph/bipartitegraph/bipartitegraph.go | 41 + .../bipartitegraph/bipartitegraphmatching.go | 159 + .../matchers/support/goraph/edge/edge.go | 61 + .../matchers/support/goraph/node/node.go | 7 + .../matchers/support/goraph/util/util.go | 7 + .../onsi/gomega/matchers/type_support.go | 179 + .../onsi/gomega/matchers/with_transform.go | 72 + vendor/github.com/onsi/gomega/types/types.go | 26 + vendor/github.com/pkg/errors/.gitignore | 24 - vendor/github.com/pkg/errors/.travis.yml | 15 - vendor/github.com/pkg/errors/LICENSE | 23 - vendor/github.com/pkg/errors/README.md | 52 - vendor/github.com/pkg/errors/appveyor.yml | 32 - vendor/github.com/pkg/errors/errors.go | 282 - vendor/github.com/pkg/errors/stack.go | 147 - vendor/github.com/spf13/afero/.travis.yml | 21 - vendor/github.com/spf13/afero/LICENSE.txt | 174 - vendor/github.com/spf13/afero/README.md | 452 - vendor/github.com/spf13/afero/afero.go | 108 - vendor/github.com/spf13/afero/appveyor.yml | 15 - vendor/github.com/spf13/afero/basepath.go | 180 - .../github.com/spf13/afero/cacheOnReadFs.go | 290 - vendor/github.com/spf13/afero/const_bsds.go | 22 - .../github.com/spf13/afero/const_win_unix.go | 25 - .../github.com/spf13/afero/copyOnWriteFs.go | 293 - vendor/github.com/spf13/afero/go.mod | 3 - vendor/github.com/spf13/afero/go.sum | 2 - vendor/github.com/spf13/afero/httpFs.go | 110 - vendor/github.com/spf13/afero/ioutil.go | 230 - vendor/github.com/spf13/afero/lstater.go | 27 - vendor/github.com/spf13/afero/match.go | 110 - vendor/github.com/spf13/afero/mem/dir.go | 37 - vendor/github.com/spf13/afero/mem/dirmap.go | 43 - vendor/github.com/spf13/afero/mem/file.go | 317 - vendor/github.com/spf13/afero/memmap.go | 365 - vendor/github.com/spf13/afero/os.go | 101 - vendor/github.com/spf13/afero/path.go | 106 - vendor/github.com/spf13/afero/readonlyfs.go | 80 - vendor/github.com/spf13/afero/regexpfs.go | 214 - vendor/github.com/spf13/afero/unionFile.go | 320 - vendor/github.com/spf13/afero/util.go | 330 - vendor/golang.org/x/net/html/atom/atom.go | 78 + vendor/golang.org/x/net/html/atom/gen.go | 712 + vendor/golang.org/x/net/html/atom/table.go | 783 + .../golang.org/x/net/html/charset/charset.go | 257 + vendor/golang.org/x/net/html/const.go | 112 + vendor/golang.org/x/net/html/doc.go | 106 + vendor/golang.org/x/net/html/doctype.go | 156 + vendor/golang.org/x/net/html/entity.go | 2253 + vendor/golang.org/x/net/html/escape.go | 258 + vendor/golang.org/x/net/html/foreign.go | 226 + vendor/golang.org/x/net/html/node.go | 220 + vendor/golang.org/x/net/html/parse.go | 2417 + vendor/golang.org/x/net/html/render.go | 271 + vendor/golang.org/x/net/html/token.go | 1219 + vendor/golang.org/x/net/http2/frame.go | 2 +- vendor/golang.org/x/net/http2/server.go | 18 +- vendor/golang.org/x/net/http2/transport.go | 6 +- .../x/net/idna/{idna.go => idna10.0.0.go} | 8 +- vendor/golang.org/x/net/idna/idna9.0.0.go | 682 + .../x/net/idna/{tables.go => tables10.0.0.go} | 6 +- vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 ++ vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 ++ vendor/golang.org/x/sys/AUTHORS | 3 + vendor/golang.org/x/sys/CONTRIBUTORS | 3 + vendor/golang.org/x/sys/LICENSE | 27 + vendor/golang.org/x/sys/PATENTS | 22 + vendor/golang.org/x/sys/unix/.gitignore | 2 + vendor/golang.org/x/sys/unix/README.md | 173 + .../golang.org/x/sys/unix/affinity_linux.go | 124 + vendor/golang.org/x/sys/unix/aliases.go | 14 + vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 17 + vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 + .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 + .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + .../x/sys/unix/asm_dragonfly_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 + vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 57 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 52 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 56 + vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + .../golang.org/x/sys/unix/bluetooth_linux.go | 35 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 + vendor/golang.org/x/sys/unix/constants.go | 13 + vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 + vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 + vendor/golang.org/x/sys/unix/dev_darwin.go | 24 + vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 + vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 + vendor/golang.org/x/sys/unix/dev_linux.go | 42 + vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 + vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 + vendor/golang.org/x/sys/unix/dirent.go | 17 + vendor/golang.org/x/sys/unix/endian_big.go | 9 + vendor/golang.org/x/sys/unix/endian_little.go | 9 + vendor/golang.org/x/sys/unix/env_unix.go | 31 + .../x/sys/unix/errors_freebsd_386.go | 227 + .../x/sys/unix/errors_freebsd_amd64.go | 227 + .../x/sys/unix/errors_freebsd_arm.go | 226 + vendor/golang.org/x/sys/unix/fcntl.go | 32 + vendor/golang.org/x/sys/unix/fcntl_darwin.go | 18 + .../x/sys/unix/fcntl_linux_32bit.go | 13 + vendor/golang.org/x/sys/unix/gccgo.go | 62 + vendor/golang.org/x/sys/unix/gccgo_c.c | 39 + .../x/sys/unix/gccgo_linux_amd64.go | 20 + vendor/golang.org/x/sys/unix/ioctl.go | 30 + vendor/golang.org/x/sys/unix/mkall.sh | 221 + vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 + vendor/golang.org/x/sys/unix/mkerrors.sh | 661 + vendor/golang.org/x/sys/unix/mkpost.go | 106 + vendor/golang.org/x/sys/unix/mksyscall.go | 407 + .../x/sys/unix/mksyscall_aix_ppc.go | 415 + .../x/sys/unix/mksyscall_aix_ppc64.go | 614 + .../x/sys/unix/mksyscall_solaris.go | 335 + .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 + vendor/golang.org/x/sys/unix/mksysnum.go | 190 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 + .../golang.org/x/sys/unix/pledge_openbsd.go | 163 + vendor/golang.org/x/sys/unix/race.go | 30 + vendor/golang.org/x/sys/unix/race0.go | 25 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 120 + vendor/golang.org/x/sys/unix/str.go | 26 + vendor/golang.org/x/sys/unix/syscall.go | 54 + vendor/golang.org/x/sys/unix/syscall_aix.go | 557 + .../golang.org/x/sys/unix/syscall_aix_ppc.go | 34 + .../x/sys/unix/syscall_aix_ppc64.go | 34 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 624 + .../golang.org/x/sys/unix/syscall_darwin.go | 706 + .../x/sys/unix/syscall_darwin_386.go | 63 + .../x/sys/unix/syscall_darwin_amd64.go | 63 + .../x/sys/unix/syscall_darwin_arm.go | 64 + .../x/sys/unix/syscall_darwin_arm64.go | 66 + .../x/sys/unix/syscall_darwin_libSystem.go | 31 + .../x/sys/unix/syscall_dragonfly.go | 539 + .../x/sys/unix/syscall_dragonfly_amd64.go | 52 + .../golang.org/x/sys/unix/syscall_freebsd.go | 824 + .../x/sys/unix/syscall_freebsd_386.go | 52 + .../x/sys/unix/syscall_freebsd_amd64.go | 52 + .../x/sys/unix/syscall_freebsd_arm.go | 52 + .../x/sys/unix/syscall_freebsd_arm64.go | 52 + vendor/golang.org/x/sys/unix/syscall_linux.go | 1847 + .../x/sys/unix/syscall_linux_386.go | 386 + .../x/sys/unix/syscall_linux_amd64.go | 190 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../x/sys/unix/syscall_linux_arm.go | 274 + .../x/sys/unix/syscall_linux_arm64.go | 223 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 + .../x/sys/unix/syscall_linux_gc_386.go | 16 + .../x/sys/unix/syscall_linux_gccgo_386.go | 30 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 20 + .../x/sys/unix/syscall_linux_mips64x.go | 222 + .../x/sys/unix/syscall_linux_mipsx.go | 234 + .../x/sys/unix/syscall_linux_ppc64x.go | 152 + .../x/sys/unix/syscall_linux_riscv64.go | 226 + .../x/sys/unix/syscall_linux_s390x.go | 338 + .../x/sys/unix/syscall_linux_sparc64.go | 147 + .../golang.org/x/sys/unix/syscall_netbsd.go | 622 + .../x/sys/unix/syscall_netbsd_386.go | 33 + .../x/sys/unix/syscall_netbsd_amd64.go | 33 + .../x/sys/unix/syscall_netbsd_arm.go | 33 + .../x/sys/unix/syscall_netbsd_arm64.go | 33 + .../golang.org/x/sys/unix/syscall_openbsd.go | 416 + .../x/sys/unix/syscall_openbsd_386.go | 37 + .../x/sys/unix/syscall_openbsd_amd64.go | 37 + .../x/sys/unix/syscall_openbsd_arm.go | 37 + .../x/sys/unix/syscall_openbsd_arm64.go | 37 + .../golang.org/x/sys/unix/syscall_solaris.go | 737 + .../x/sys/unix/syscall_solaris_amd64.go | 23 + vendor/golang.org/x/sys/unix/syscall_unix.go | 431 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 + .../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 + vendor/golang.org/x/sys/unix/timestruct.go | 82 + vendor/golang.org/x/sys/unix/types_aix.go | 236 + vendor/golang.org/x/sys/unix/types_darwin.go | 283 + .../golang.org/x/sys/unix/types_dragonfly.go | 263 + vendor/golang.org/x/sys/unix/types_freebsd.go | 356 + vendor/golang.org/x/sys/unix/types_netbsd.go | 289 + vendor/golang.org/x/sys/unix/types_openbsd.go | 282 + vendor/golang.org/x/sys/unix/types_solaris.go | 266 + .../golang.org/x/sys/unix/unveil_openbsd.go | 42 + vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1374 + .../x/sys/unix/zerrors_aix_ppc64.go | 1375 + .../x/sys/unix/zerrors_darwin_386.go | 1783 + .../x/sys/unix/zerrors_darwin_amd64.go | 1783 + .../x/sys/unix/zerrors_darwin_arm.go | 1783 + .../x/sys/unix/zerrors_darwin_arm64.go | 1783 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1650 + .../x/sys/unix/zerrors_freebsd_386.go | 1793 + .../x/sys/unix/zerrors_freebsd_amd64.go | 1794 + .../x/sys/unix/zerrors_freebsd_arm.go | 1802 + .../x/sys/unix/zerrors_freebsd_arm64.go | 1794 + .../x/sys/unix/zerrors_linux_386.go | 2839 + .../x/sys/unix/zerrors_linux_amd64.go | 2839 + .../x/sys/unix/zerrors_linux_arm.go | 2845 + .../x/sys/unix/zerrors_linux_arm64.go | 2830 + .../x/sys/unix/zerrors_linux_mips.go | 2846 + .../x/sys/unix/zerrors_linux_mips64.go | 2846 + .../x/sys/unix/zerrors_linux_mips64le.go | 2846 + .../x/sys/unix/zerrors_linux_mipsle.go | 2846 + .../x/sys/unix/zerrors_linux_ppc64.go | 2901 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2901 + .../x/sys/unix/zerrors_linux_riscv64.go | 2826 + .../x/sys/unix/zerrors_linux_s390x.go | 2899 + .../x/sys/unix/zerrors_linux_sparc64.go | 2895 + .../x/sys/unix/zerrors_netbsd_386.go | 1772 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1762 + .../x/sys/unix/zerrors_netbsd_arm.go | 1751 + .../x/sys/unix/zerrors_netbsd_arm64.go | 1762 + .../x/sys/unix/zerrors_openbsd_386.go | 1654 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1765 + .../x/sys/unix/zerrors_openbsd_arm.go | 1656 + .../x/sys/unix/zerrors_openbsd_arm64.go | 1789 + .../x/sys/unix/zerrors_solaris_amd64.go | 1532 + .../golang.org/x/sys/unix/zptrace386_linux.go | 80 + .../golang.org/x/sys/unix/zptracearm_linux.go | 41 + .../x/sys/unix/zptracemips_linux.go | 50 + .../x/sys/unix/zptracemipsle_linux.go | 50 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1484 + .../x/sys/unix/zsyscall_aix_ppc64.go | 1442 + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 + .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 + .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1810 + .../x/sys/unix/zsyscall_darwin_386.go | 2505 + .../x/sys/unix/zsyscall_darwin_386.s | 284 + .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1810 + .../x/sys/unix/zsyscall_darwin_amd64.go | 2520 + .../x/sys/unix/zsyscall_darwin_amd64.s | 286 + .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1793 + .../x/sys/unix/zsyscall_darwin_arm.go | 2483 + .../x/sys/unix/zsyscall_darwin_arm.s | 282 + .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1793 + .../x/sys/unix/zsyscall_darwin_arm64.go | 2483 + .../x/sys/unix/zsyscall_darwin_arm64.s | 282 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1659 + .../x/sys/unix/zsyscall_freebsd_386.go | 2015 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 2015 + .../x/sys/unix/zsyscall_freebsd_arm.go | 2015 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 2015 + .../x/sys/unix/zsyscall_linux_386.go | 2246 + .../x/sys/unix/zsyscall_linux_amd64.go | 2413 + .../x/sys/unix/zsyscall_linux_arm.go | 2368 + .../x/sys/unix/zsyscall_linux_arm64.go | 2270 + .../x/sys/unix/zsyscall_linux_mips.go | 2426 + .../x/sys/unix/zsyscall_linux_mips64.go | 2397 + .../x/sys/unix/zsyscall_linux_mips64le.go | 2397 + .../x/sys/unix/zsyscall_linux_mipsle.go | 2426 + .../x/sys/unix/zsyscall_linux_ppc64.go | 2475 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 2475 + .../x/sys/unix/zsyscall_linux_riscv64.go | 2250 + .../x/sys/unix/zsyscall_linux_s390x.go | 2245 + .../x/sys/unix/zsyscall_linux_sparc64.go | 2408 + .../x/sys/unix/zsyscall_netbsd_386.go | 1826 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 1826 + .../x/sys/unix/zsyscall_netbsd_arm.go | 1826 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 1826 + .../x/sys/unix/zsyscall_openbsd_386.go | 1692 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1692 + .../x/sys/unix/zsyscall_openbsd_arm.go | 1692 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 1692 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1953 + .../x/sys/unix/zsysctl_openbsd_386.go | 272 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 270 + .../x/sys/unix/zsysctl_openbsd_arm.go | 272 + .../x/sys/unix/zsysctl_openbsd_arm64.go | 275 + .../x/sys/unix/zsysnum_darwin_386.go | 436 + .../x/sys/unix/zsysnum_darwin_amd64.go | 438 + .../x/sys/unix/zsysnum_darwin_arm.go | 436 + .../x/sys/unix/zsysnum_darwin_arm64.go | 436 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 + .../x/sys/unix/zsysnum_freebsd_386.go | 403 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 403 + .../x/sys/unix/zsysnum_freebsd_arm.go | 403 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 395 + .../x/sys/unix/zsysnum_linux_386.go | 392 + .../x/sys/unix/zsysnum_linux_amd64.go | 344 + .../x/sys/unix/zsysnum_linux_arm.go | 364 + .../x/sys/unix/zsysnum_linux_arm64.go | 289 + .../x/sys/unix/zsysnum_linux_mips.go | 377 + .../x/sys/unix/zsysnum_linux_mips64.go | 337 + .../x/sys/unix/zsysnum_linux_mips64le.go | 337 + .../x/sys/unix/zsysnum_linux_mipsle.go | 377 + .../x/sys/unix/zsysnum_linux_ppc64.go | 375 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 375 + .../x/sys/unix/zsysnum_linux_riscv64.go | 288 + .../x/sys/unix/zsysnum_linux_s390x.go | 337 + .../x/sys/unix/zsysnum_linux_sparc64.go | 351 + .../x/sys/unix/zsysnum_netbsd_386.go | 274 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 274 + .../x/sys/unix/zsysnum_netbsd_arm.go | 274 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 274 + .../x/sys/unix/zsysnum_openbsd_386.go | 218 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 218 + .../x/sys/unix/zsysnum_openbsd_arm.go | 218 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 217 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 345 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 350 + .../x/sys/unix/ztypes_darwin_386.go | 499 + .../x/sys/unix/ztypes_darwin_amd64.go | 509 + .../x/sys/unix/ztypes_darwin_arm.go | 500 + .../x/sys/unix/ztypes_darwin_arm64.go | 509 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 469 + .../x/sys/unix/ztypes_freebsd_386.go | 603 + .../x/sys/unix/ztypes_freebsd_amd64.go | 602 + .../x/sys/unix/ztypes_freebsd_arm.go | 602 + .../x/sys/unix/ztypes_freebsd_arm64.go | 602 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 2300 + .../x/sys/unix/ztypes_linux_amd64.go | 2313 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2291 + .../x/sys/unix/ztypes_linux_arm64.go | 2292 + .../x/sys/unix/ztypes_linux_mips.go | 2297 + .../x/sys/unix/ztypes_linux_mips64.go | 2294 + .../x/sys/unix/ztypes_linux_mips64le.go | 2294 + .../x/sys/unix/ztypes_linux_mipsle.go | 2297 + .../x/sys/unix/ztypes_linux_ppc64.go | 2302 + .../x/sys/unix/ztypes_linux_ppc64le.go | 2302 + .../x/sys/unix/ztypes_linux_riscv64.go | 2319 + .../x/sys/unix/ztypes_linux_s390x.go | 2316 + .../x/sys/unix/ztypes_linux_sparc64.go | 2297 + .../x/sys/unix/ztypes_netbsd_386.go | 465 + .../x/sys/unix/ztypes_netbsd_amd64.go | 472 + .../x/sys/unix/ztypes_netbsd_arm.go | 470 + .../x/sys/unix/ztypes_netbsd_arm64.go | 472 + .../x/sys/unix/ztypes_openbsd_386.go | 570 + .../x/sys/unix/ztypes_openbsd_amd64.go | 570 + .../x/sys/unix/ztypes_openbsd_arm.go | 571 + .../x/sys/unix/ztypes_openbsd_arm64.go | 564 + .../x/sys/unix/ztypes_solaris_amd64.go | 442 + .../x/text/encoding/charmap/charmap.go | 249 + .../x/text/encoding/charmap/maketables.go | 556 + .../x/text/encoding/charmap/tables.go | 7410 +++ vendor/golang.org/x/text/encoding/encoding.go | 335 + .../x/text/encoding/htmlindex/gen.go | 173 + .../x/text/encoding/htmlindex/htmlindex.go | 86 + .../x/text/encoding/htmlindex/map.go | 105 + .../x/text/encoding/htmlindex/tables.go | 353 + .../text/encoding/internal/identifier/gen.go | 142 + .../internal/identifier/identifier.go | 81 + .../text/encoding/internal/identifier/mib.go | 1619 + .../x/text/encoding/internal/internal.go | 75 + .../x/text/encoding/japanese/all.go | 12 + .../x/text/encoding/japanese/eucjp.go | 225 + .../x/text/encoding/japanese/iso2022jp.go | 299 + .../x/text/encoding/japanese/maketables.go | 161 + .../x/text/encoding/japanese/shiftjis.go | 189 + .../x/text/encoding/japanese/tables.go | 26971 ++++++++ .../x/text/encoding/korean/euckr.go | 177 + .../x/text/encoding/korean/maketables.go | 143 + .../x/text/encoding/korean/tables.go | 34152 ++++++++++ .../x/text/encoding/simplifiedchinese/all.go | 12 + .../x/text/encoding/simplifiedchinese/gbk.go | 269 + .../encoding/simplifiedchinese/hzgb2312.go | 245 + .../encoding/simplifiedchinese/maketables.go | 161 + .../text/encoding/simplifiedchinese/tables.go | 43999 +++++++++++++ .../text/encoding/traditionalchinese/big5.go | 199 + .../encoding/traditionalchinese/maketables.go | 140 + .../encoding/traditionalchinese/tables.go | 37142 +++++++++++ .../x/text/encoding/unicode/override.go | 82 + .../x/text/encoding/unicode/unicode.go | 434 + .../x/text/internal/language/common.go | 16 + .../x/text/internal/language/compact.go | 29 + .../text/internal/language/compact/compact.go | 61 + .../x/text/internal/language/compact/gen.go | 64 + .../internal/language/compact/gen_index.go | 113 + .../internal/language/compact/gen_parents.go | 54 + .../internal/language/compact/language.go | 260 + .../text/internal/language/compact/parents.go | 120 + .../text/internal/language/compact/tables.go | 1015 + .../x/text/internal/language/compact/tags.go | 91 + .../x/text/internal/language/compose.go | 167 + .../x/text/internal/language/coverage.go | 28 + .../x/text/internal/language/gen.go | 1520 + .../x/text/internal/language/gen_common.go | 20 + .../x/text/internal/language/language.go | 596 + .../x/text/internal/language/lookup.go | 412 + .../x/text/internal/language/match.go | 226 + .../x/text/internal/language/parse.go | 594 + .../x/text/internal/language/tables.go | 3431 + .../x/text/internal/language/tags.go | 48 + vendor/golang.org/x/text/internal/tag/tag.go | 100 + .../internal/utf8internal/utf8internal.go | 87 + vendor/golang.org/x/text/language/coverage.go | 187 + vendor/golang.org/x/text/language/doc.go | 102 + vendor/golang.org/x/text/language/gen.go | 305 + vendor/golang.org/x/text/language/go1_1.go | 38 + vendor/golang.org/x/text/language/go1_2.go | 11 + vendor/golang.org/x/text/language/language.go | 601 + vendor/golang.org/x/text/language/match.go | 735 + vendor/golang.org/x/text/language/parse.go | 228 + vendor/golang.org/x/text/language/tables.go | 298 + vendor/golang.org/x/text/language/tags.go | 145 + vendor/golang.org/x/text/runes/cond.go | 187 + vendor/golang.org/x/text/runes/runes.go | 355 + .../golang.org/x/text/transform/transform.go | 6 +- vendor/golang.org/x/text/unicode/bidi/bidi.go | 2 +- .../golang.org/x/text/unicode/bidi/bracket.go | 4 +- vendor/golang.org/x/text/unicode/bidi/core.go | 2 +- vendor/golang.org/x/text/unicode/bidi/gen.go | 2 +- .../x/text/unicode/bidi/gen_ranges.go | 2 +- .../x/text/unicode/bidi/tables10.0.0.go | 2 +- .../x/text/unicode/bidi/tables11.0.0.go | 1887 + .../x/text/unicode/norm/composition.go | 8 +- .../x/text/unicode/norm/forminfo.go | 19 + vendor/golang.org/x/text/unicode/norm/iter.go | 3 +- .../x/text/unicode/norm/maketables.go | 20 +- .../x/text/unicode/norm/normalize.go | 4 +- .../x/text/unicode/norm/readwriter.go | 4 +- .../x/text/unicode/norm/tables10.0.0.go | 1892 +- .../x/text/unicode/norm/tables11.0.0.go | 7693 +++ .../x/text/unicode/norm/tables9.0.0.go | 1890 +- .../x/text/unicode/norm/transform.go | 10 +- .../x/tools/go/ast/astutil/enclosing.go | 627 - .../x/tools/go/ast/astutil/imports.go | 481 - .../x/tools/go/ast/astutil/rewrite.go | 477 - .../golang.org/x/tools/go/ast/astutil/util.go | 14 - vendor/golang.org/x/tools/go/expect/expect.go | 149 + .../golang.org/x/tools/go/expect/extract.go | 263 + .../golang.org/x/tools/go/internal/cgo/cgo.go | 220 - .../x/tools/go/internal/cgo/cgo_pkgconfig.go | 39 - .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 133 +- .../x/tools/go/packages/golist_fallback.go | 450 - .../go/packages/golist_fallback_testmain.go | 318 - .../x/tools/go/packages/golist_overlay.go | 36 +- .../x/tools/go/packages/packages.go | 232 +- .../tools/go/packages/packagestest/expect.go | 404 + .../tools/go/packages/packagestest/export.go | 323 + .../tools/go/packages/packagestest/gopath.go | 74 + .../tools/go/packages/packagestest/modules.go | 213 + .../go/packages/packagestest/modules_111.go | 7 + .../go/packages/packagestest/modules_112.go | 17 + .../go/packages/packagestest/modules_113.go | 21 + vendor/golang.org/x/tools/imports/fix.go | 1259 - vendor/golang.org/x/tools/imports/imports.go | 315 - vendor/golang.org/x/tools/imports/mkindex.go | 173 - vendor/golang.org/x/tools/imports/mkstdlib.go | 112 - vendor/golang.org/x/tools/imports/mod.go | 351 - .../golang.org/x/tools/imports/sortimports.go | 230 - vendor/golang.org/x/tools/imports/zstdlib.go | 10302 --- .../x/tools/internal/module/module.go | 540 - .../golang.org/x/tools/internal/span/parse.go | 100 + .../golang.org/x/tools/internal/span/span.go | 279 + .../golang.org/x/tools/internal/span/token.go | 138 + .../x/tools/internal/span/token111.go | 39 + .../x/tools/internal/span/token112.go | 16 + .../golang.org/x/tools/internal/span/uri.go | 126 + .../golang.org/x/tools/internal/span/utf16.go | 87 + vendor/gopkg.in/fsnotify.v1/.editorconfig | 5 + vendor/gopkg.in/fsnotify.v1/.gitignore | 6 + vendor/gopkg.in/fsnotify.v1/.travis.yml | 30 + vendor/gopkg.in/fsnotify.v1/AUTHORS | 52 + vendor/gopkg.in/fsnotify.v1/CHANGELOG.md | 317 + vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 + vendor/gopkg.in/fsnotify.v1/LICENSE | 28 + vendor/gopkg.in/fsnotify.v1/README.md | 79 + vendor/gopkg.in/fsnotify.v1/fen.go | 37 + vendor/gopkg.in/fsnotify.v1/fsnotify.go | 66 + vendor/gopkg.in/fsnotify.v1/inotify.go | 337 + vendor/gopkg.in/fsnotify.v1/inotify_poller.go | 187 + vendor/gopkg.in/fsnotify.v1/kqueue.go | 521 + vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 + .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 + vendor/gopkg.in/fsnotify.v1/windows.go | 561 + vendor/gopkg.in/tomb.v1/LICENSE | 29 + vendor/gopkg.in/tomb.v1/README.md | 4 + vendor/gopkg.in/tomb.v1/tomb.go | 176 + .../api/core/v1/annotation_key_constants.go | 100 - vendor/k8s.io/api/core/v1/doc.go | 21 - vendor/k8s.io/api/core/v1/generated.pb.go | 52455 ---------------- vendor/k8s.io/api/core/v1/generated.proto | 4790 -- vendor/k8s.io/api/core/v1/objectreference.go | 33 - vendor/k8s.io/api/core/v1/register.go | 99 - vendor/k8s.io/api/core/v1/resource.go | 56 - vendor/k8s.io/api/core/v1/taint.go | 33 - vendor/k8s.io/api/core/v1/toleration.go | 56 - vendor/k8s.io/api/core/v1/types.go | 5361 -- .../core/v1/types_swagger_doc_generated.go | 2346 - .../api/core/v1/zz_generated.deepcopy.go | 5430 -- vendor/k8s.io/gengo/LICENSE | 202 - vendor/k8s.io/gengo/args/args.go | 199 - .../gengo/generator/default_generator.go | 62 - .../k8s.io/gengo/generator/default_package.go | 72 - vendor/k8s.io/gengo/generator/doc.go | 31 - .../k8s.io/gengo/generator/error_tracker.go | 50 - vendor/k8s.io/gengo/generator/execute.go | 312 - vendor/k8s.io/gengo/generator/generator.go | 219 - .../k8s.io/gengo/generator/import_tracker.go | 64 - .../k8s.io/gengo/generator/snippet_writer.go | 154 - vendor/k8s.io/gengo/namer/doc.go | 31 - vendor/k8s.io/gengo/namer/import_tracker.go | 112 - vendor/k8s.io/gengo/namer/namer.go | 383 - vendor/k8s.io/gengo/namer/order.go | 69 - vendor/k8s.io/gengo/namer/plural_namer.go | 120 - vendor/k8s.io/gengo/parser/doc.go | 19 - vendor/k8s.io/gengo/parser/parse.go | 813 - vendor/k8s.io/gengo/types/comments.go | 82 - vendor/k8s.io/gengo/types/doc.go | 19 - vendor/k8s.io/gengo/types/flatten.go | 57 - vendor/k8s.io/gengo/types/types.go | 499 - vendor/modules.txt | 106 +- .../ghodss => sigs.k8s.io}/yaml/.gitignore | 0 vendor/sigs.k8s.io/yaml/.travis.yml | 14 + vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 + .../ghodss => sigs.k8s.io}/yaml/LICENSE | 0 vendor/sigs.k8s.io/yaml/OWNERS | 25 + .../ghodss => sigs.k8s.io}/yaml/README.md | 0 vendor/sigs.k8s.io/yaml/RELEASE.md | 9 + vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 + vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 + .../ghodss => sigs.k8s.io}/yaml/fields.go | 1 + .../ghodss => sigs.k8s.io}/yaml/yaml.go | 68 +- vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 + 680 files changed, 439954 insertions(+), 96995 deletions(-) delete mode 100644 vendor/github.com/ghodss/yaml/.travis.yml create mode 100644 vendor/github.com/hpcloud/tail/.gitignore create mode 100644 vendor/github.com/hpcloud/tail/.travis.yml create mode 100644 vendor/github.com/hpcloud/tail/CHANGES.md create mode 100644 vendor/github.com/hpcloud/tail/Dockerfile create mode 100644 vendor/github.com/hpcloud/tail/LICENSE.txt create mode 100644 vendor/github.com/hpcloud/tail/Makefile create mode 100644 vendor/github.com/hpcloud/tail/README.md create mode 100644 vendor/github.com/hpcloud/tail/appveyor.yml create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/Licence create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/memory.go create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/storage.go create mode 100644 vendor/github.com/hpcloud/tail/tail.go create mode 100644 vendor/github.com/hpcloud/tail/tail_posix.go create mode 100644 vendor/github.com/hpcloud/tail/tail_windows.go create mode 100644 vendor/github.com/hpcloud/tail/util/util.go create mode 100644 vendor/github.com/hpcloud/tail/watch/filechanges.go create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify.go create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify_tracker.go create mode 100644 vendor/github.com/hpcloud/tail/watch/polling.go create mode 100644 vendor/github.com/hpcloud/tail/watch/watch.go create mode 100644 vendor/github.com/hpcloud/tail/winfile/winfile.go create mode 100644 vendor/github.com/onsi/ginkgo/.gitignore create mode 100644 vendor/github.com/onsi/ginkgo/.travis.yml create mode 100644 vendor/github.com/onsi/ginkgo/CHANGELOG.md create mode 100644 vendor/github.com/onsi/ginkgo/CONTRIBUTING.md create mode 100644 vendor/github.com/onsi/ginkgo/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/README.md create mode 100644 vendor/github.com/onsi/ginkgo/RELEASING.md create mode 100644 vendor/github.com/onsi/ginkgo/config/config.go create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/failer/failer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/server.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/spec.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/specs.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/suite/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/writer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/types/synchronization.go create mode 100644 vendor/github.com/onsi/ginkgo/types/types.go create mode 100644 vendor/github.com/onsi/gomega/.gitignore create mode 100644 vendor/github.com/onsi/gomega/.travis.yml create mode 100644 vendor/github.com/onsi/gomega/CHANGELOG.md create mode 100644 vendor/github.com/onsi/gomega/CONTRIBUTING.md create mode 100644 vendor/github.com/onsi/gomega/LICENSE create mode 100644 vendor/github.com/onsi/gomega/Makefile create mode 100644 vendor/github.com/onsi/gomega/README.md create mode 100644 vendor/github.com/onsi/gomega/RELEASING.md create mode 100644 vendor/github.com/onsi/gomega/format/format.go create mode 100644 vendor/github.com/onsi/gomega/go.mod create mode 100644 vendor/github.com/onsi/gomega/go.sum create mode 100644 vendor/github.com/onsi/gomega/gomega_dsl.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go create mode 100644 vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go create mode 100644 vendor/github.com/onsi/gomega/matchers.go create mode 100644 vendor/github.com/onsi/gomega/matchers/and.go create mode 100644 vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/attributes_slice.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_directory.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_false_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_identical_to.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_true_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/consist_of.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/equal_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_len_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_error_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_json_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/not.go create mode 100644 vendor/github.com/onsi/gomega/matchers/or.go create mode 100644 vendor/github.com/onsi/gomega/matchers/panic_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/receive_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go create mode 100644 vendor/github.com/onsi/gomega/matchers/succeed_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go create mode 100644 vendor/github.com/onsi/gomega/matchers/type_support.go create mode 100644 vendor/github.com/onsi/gomega/matchers/with_transform.go create mode 100644 vendor/github.com/onsi/gomega/types/types.go delete mode 100644 vendor/github.com/pkg/errors/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/github.com/pkg/errors/LICENSE delete mode 100644 vendor/github.com/pkg/errors/README.md delete mode 100644 vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/github.com/pkg/errors/errors.go delete mode 100644 vendor/github.com/pkg/errors/stack.go delete mode 100644 vendor/github.com/spf13/afero/.travis.yml delete mode 100644 vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 vendor/github.com/spf13/afero/README.md delete mode 100644 vendor/github.com/spf13/afero/afero.go delete mode 100644 vendor/github.com/spf13/afero/appveyor.yml delete mode 100644 vendor/github.com/spf13/afero/basepath.go delete mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 vendor/github.com/spf13/afero/go.mod delete mode 100644 vendor/github.com/spf13/afero/go.sum delete mode 100644 vendor/github.com/spf13/afero/httpFs.go delete mode 100644 vendor/github.com/spf13/afero/ioutil.go delete mode 100644 vendor/github.com/spf13/afero/lstater.go delete mode 100644 vendor/github.com/spf13/afero/match.go delete mode 100644 vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 vendor/github.com/spf13/afero/mem/file.go delete mode 100644 vendor/github.com/spf13/afero/memmap.go delete mode 100644 vendor/github.com/spf13/afero/os.go delete mode 100644 vendor/github.com/spf13/afero/path.go delete mode 100644 vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 vendor/github.com/spf13/afero/unionFile.go delete mode 100644 vendor/github.com/spf13/afero/util.go create mode 100644 vendor/golang.org/x/net/html/atom/atom.go create mode 100644 vendor/golang.org/x/net/html/atom/gen.go create mode 100644 vendor/golang.org/x/net/html/atom/table.go create mode 100644 vendor/golang.org/x/net/html/charset/charset.go create mode 100644 vendor/golang.org/x/net/html/const.go create mode 100644 vendor/golang.org/x/net/html/doc.go create mode 100644 vendor/golang.org/x/net/html/doctype.go create mode 100644 vendor/golang.org/x/net/html/entity.go create mode 100644 vendor/golang.org/x/net/html/escape.go create mode 100644 vendor/golang.org/x/net/html/foreign.go create mode 100644 vendor/golang.org/x/net/html/node.go create mode 100644 vendor/golang.org/x/net/html/parse.go create mode 100644 vendor/golang.org/x/net/html/render.go create mode 100644 vendor/golang.org/x/net/html/token.go rename vendor/golang.org/x/net/idna/{idna.go => idna10.0.0.go} (99%) create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go rename vendor/golang.org/x/net/idna/{tables.go => tables10.0.0.go} (99%) create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go create mode 100644 vendor/golang.org/x/sys/AUTHORS create mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sys/LICENSE create mode 100644 vendor/golang.org/x/sys/PATENTS create mode 100644 vendor/golang.org/x/sys/unix/.gitignore create mode 100644 vendor/golang.org/x/sys/unix/README.md create mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go create mode 100644 vendor/golang.org/x/sys/unix/aliases.go create mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/constants.go create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dirent.go create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl.go create mode 100644 vendor/golang.org/x/sys/unix/mkall.sh create mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh create mode 100644 vendor/golang.org/x/sys/unix/mkpost.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go create mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go create mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/race.go create mode 100644 vendor/golang.org/x/sys/unix/race0.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 vendor/golang.org/x/sys/unix/str.go create mode 100644 vendor/golang.org/x/sys/unix/syscall.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 vendor/golang.org/x/sys/unix/types_aix.go create mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace386_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracearm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracemips_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracemipsle_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/charmap.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/tables.go create mode 100644 vendor/golang.org/x/text/encoding/encoding.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/gen.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/map.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/tables.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/identifier.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/mib.go create mode 100644 vendor/golang.org/x/text/encoding/internal/internal.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/all.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/eucjp.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/iso2022jp.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/shiftjis.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/tables.go create mode 100644 vendor/golang.org/x/text/encoding/korean/euckr.go create mode 100644 vendor/golang.org/x/text/encoding/korean/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/korean/tables.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/all.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/big5.go create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/tables.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/override.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode.go create mode 100644 vendor/golang.org/x/text/internal/language/common.go create mode 100644 vendor/golang.org/x/text/internal/language/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/language.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/parents.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tags.go create mode 100644 vendor/golang.org/x/text/internal/language/compose.go create mode 100644 vendor/golang.org/x/text/internal/language/coverage.go create mode 100644 vendor/golang.org/x/text/internal/language/gen.go create mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go create mode 100644 vendor/golang.org/x/text/internal/language/language.go create mode 100644 vendor/golang.org/x/text/internal/language/lookup.go create mode 100644 vendor/golang.org/x/text/internal/language/match.go create mode 100644 vendor/golang.org/x/text/internal/language/parse.go create mode 100644 vendor/golang.org/x/text/internal/language/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/tags.go create mode 100644 vendor/golang.org/x/text/internal/tag/tag.go create mode 100644 vendor/golang.org/x/text/internal/utf8internal/utf8internal.go create mode 100644 vendor/golang.org/x/text/language/coverage.go create mode 100644 vendor/golang.org/x/text/language/doc.go create mode 100644 vendor/golang.org/x/text/language/gen.go create mode 100644 vendor/golang.org/x/text/language/go1_1.go create mode 100644 vendor/golang.org/x/text/language/go1_2.go create mode 100644 vendor/golang.org/x/text/language/language.go create mode 100644 vendor/golang.org/x/text/language/match.go create mode 100644 vendor/golang.org/x/text/language/parse.go create mode 100644 vendor/golang.org/x/text/language/tables.go create mode 100644 vendor/golang.org/x/text/language/tags.go create mode 100644 vendor/golang.org/x/text/runes/cond.go create mode 100644 vendor/golang.org/x/text/runes/runes.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/go/expect/expect.go create mode 100644 vendor/golang.org/x/tools/go/expect/extract.go delete mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo.go delete mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist_fallback.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go create mode 100644 vendor/golang.org/x/tools/go/packages/packagestest/expect.go create mode 100644 vendor/golang.org/x/tools/go/packages/packagestest/export.go create mode 100644 vendor/golang.org/x/tools/go/packages/packagestest/gopath.go create mode 100644 vendor/golang.org/x/tools/go/packages/packagestest/modules.go create mode 100644 vendor/golang.org/x/tools/go/packages/packagestest/modules_111.go create mode 100644 vendor/golang.org/x/tools/go/packages/packagestest/modules_112.go create mode 100644 vendor/golang.org/x/tools/go/packages/packagestest/modules_113.go delete mode 100644 vendor/golang.org/x/tools/imports/fix.go delete mode 100644 vendor/golang.org/x/tools/imports/imports.go delete mode 100644 vendor/golang.org/x/tools/imports/mkindex.go delete mode 100644 vendor/golang.org/x/tools/imports/mkstdlib.go delete mode 100644 vendor/golang.org/x/tools/imports/mod.go delete mode 100644 vendor/golang.org/x/tools/imports/sortimports.go delete mode 100644 vendor/golang.org/x/tools/imports/zstdlib.go delete mode 100644 vendor/golang.org/x/tools/internal/module/module.go create mode 100644 vendor/golang.org/x/tools/internal/span/parse.go create mode 100644 vendor/golang.org/x/tools/internal/span/span.go create mode 100644 vendor/golang.org/x/tools/internal/span/token.go create mode 100644 vendor/golang.org/x/tools/internal/span/token111.go create mode 100644 vendor/golang.org/x/tools/internal/span/token112.go create mode 100644 vendor/golang.org/x/tools/internal/span/uri.go create mode 100644 vendor/golang.org/x/tools/internal/span/utf16.go create mode 100644 vendor/gopkg.in/fsnotify.v1/.editorconfig create mode 100644 vendor/gopkg.in/fsnotify.v1/.gitignore create mode 100644 vendor/gopkg.in/fsnotify.v1/.travis.yml create mode 100644 vendor/gopkg.in/fsnotify.v1/AUTHORS create mode 100644 vendor/gopkg.in/fsnotify.v1/CHANGELOG.md create mode 100644 vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/fsnotify.v1/LICENSE create mode 100644 vendor/gopkg.in/fsnotify.v1/README.md create mode 100644 vendor/gopkg.in/fsnotify.v1/fen.go create mode 100644 vendor/gopkg.in/fsnotify.v1/fsnotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify_poller.go create mode 100644 vendor/gopkg.in/fsnotify.v1/kqueue.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go create mode 100644 vendor/gopkg.in/fsnotify.v1/windows.go create mode 100644 vendor/gopkg.in/tomb.v1/LICENSE create mode 100644 vendor/gopkg.in/tomb.v1/README.md create mode 100644 vendor/gopkg.in/tomb.v1/tomb.go delete mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go delete mode 100644 vendor/k8s.io/api/core/v1/doc.go delete mode 100644 vendor/k8s.io/api/core/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/core/v1/generated.proto delete mode 100644 vendor/k8s.io/api/core/v1/objectreference.go delete mode 100644 vendor/k8s.io/api/core/v1/register.go delete mode 100644 vendor/k8s.io/api/core/v1/resource.go delete mode 100644 vendor/k8s.io/api/core/v1/taint.go delete mode 100644 vendor/k8s.io/api/core/v1/toleration.go delete mode 100644 vendor/k8s.io/api/core/v1/types.go delete mode 100644 vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/gengo/LICENSE delete mode 100644 vendor/k8s.io/gengo/args/args.go delete mode 100644 vendor/k8s.io/gengo/generator/default_generator.go delete mode 100644 vendor/k8s.io/gengo/generator/default_package.go delete mode 100644 vendor/k8s.io/gengo/generator/doc.go delete mode 100644 vendor/k8s.io/gengo/generator/error_tracker.go delete mode 100644 vendor/k8s.io/gengo/generator/execute.go delete mode 100644 vendor/k8s.io/gengo/generator/generator.go delete mode 100644 vendor/k8s.io/gengo/generator/import_tracker.go delete mode 100644 vendor/k8s.io/gengo/generator/snippet_writer.go delete mode 100644 vendor/k8s.io/gengo/namer/doc.go delete mode 100644 vendor/k8s.io/gengo/namer/import_tracker.go delete mode 100644 vendor/k8s.io/gengo/namer/namer.go delete mode 100644 vendor/k8s.io/gengo/namer/order.go delete mode 100644 vendor/k8s.io/gengo/namer/plural_namer.go delete mode 100644 vendor/k8s.io/gengo/parser/doc.go delete mode 100644 vendor/k8s.io/gengo/parser/parse.go delete mode 100644 vendor/k8s.io/gengo/types/comments.go delete mode 100644 vendor/k8s.io/gengo/types/doc.go delete mode 100644 vendor/k8s.io/gengo/types/flatten.go delete mode 100644 vendor/k8s.io/gengo/types/types.go rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/.gitignore (100%) create mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml create mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/LICENSE (100%) create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/README.md (100%) create mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md create mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS create mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/fields.go (99%) rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/yaml.go (77%) create mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/Gopkg.lock b/Gopkg.lock index b8d5050eb..3a6ff1d94 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,14 +1,6 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "UT" - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - [[projects]] digest = "1:5b33a8ff87888677489ba612c8aa1bf06500b087f1397a4732b7a0298f6d5f8a" name = "github.com/gobuffalo/envy" @@ -36,6 +28,20 @@ pruneopts = "UT" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" +[[projects]] + digest = "1:a1038ef593beb4771c8f0f9c26e8b00410acd800af5c6864651d9bf160ea1813" + name = "github.com/hpcloud/tail" + packages = [ + ".", + "ratelimiter", + "util", + "watch", + "winfile", + ] + pruneopts = "UT" + revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" + version = "v1.0.0" + [[projects]] digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" name = "github.com/inconshreveable/mousetrap" @@ -61,12 +67,52 @@ version = "v1.0.4" [[projects]] - digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" - name = "github.com/pkg/errors" - packages = ["."] + digest = "1:e9c3bb68a6c9470302b8046d4647e0612a2ea6037b9c6a47de60c0a90db504f8" + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types", + ] pruneopts = "UT" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" + revision = "eea6ad008b96acdaa524f5b409513bf062b500ad" + version = "v1.8.0" + +[[projects]] + digest = "1:fe167e8f858cbfc10c721f2000f2446140800bb53fa83c088e03d191275611a4" + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types", + ] + pruneopts = "UT" + revision = "90e289841c1ed79b7a598a7cd9959750cb5e89e2" + version = "v1.5.0" [[projects]] digest = "1:e09ada96a5a41deda4748b1659cc8953961799e798aea557257b56baee4ecaf3" @@ -80,17 +126,6 @@ revision = "438578804ca6f31be148c27683afc419ce47c06e" version = "v1.2.2" -[[projects]] - digest = "1:bb495ec276ab82d3dd08504bbc0594a65de8c3b22c6f2aaa92d05b73fbf3a82e" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - ] - pruneopts = "UT" - revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424" - version = "v1.2.2" - [[projects]] digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" name = "github.com/spf13/cobra" @@ -109,9 +144,12 @@ [[projects]] branch = "master" - digest = "1:c8879109b3d04b0a15c587821781276273901bcb71da902757eac904b271f672" + digest = "1:9455e1cd6c5686866edea0951b5328389645452fde9b07b7f980d80d04785c9d" name = "golang.org/x/net" packages = [ + "html", + "html/atom", + "html/charset", "http/httpguts", "http2", "http2/hpack", @@ -121,17 +159,37 @@ revision = "b630fd6fe46bcfc98f989005d8b8ec1400e60a6e" [[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" + branch = "master" + digest = "1:1ddfda089b8a15d92242412aabc1107b8c7a87c609c09c5b441c81dbcca0ab79" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "a43fa875dd822b81eb6d2ad538bc1f4caba169bd" + +[[projects]] + digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" name = "golang.org/x/text" packages = [ "collate", "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", "internal/colltab", "internal/gen", "internal/tag", "internal/triegen", "internal/ucd", + "internal/utf8internal", "language", + "runes", "secure/bidirule", "transform", "unicode/bidi", @@ -145,24 +203,34 @@ [[projects]] branch = "master" - digest = "1:59b401940f79c311efa01c10e2fa8e78522a978538127f369fb39690a03c3eb4" + digest = "1:a753d65d52015a1a59f7efcbba07c02f6a45abb4c554416191f37527a22fcef0" name = "golang.org/x/tools" packages = [ "go/ast/astutil", + "go/expect", "go/gcexportdata", "go/internal/gcimporter", "go/internal/packagesdriver", "go/packages", + "go/packages/packagestest", "go/types/typeutil", - "imports", "internal/fastwalk", "internal/gopathwalk", - "internal/module", "internal/semver", + "internal/span", ] pruneopts = "UT" revision = "8a44e74612bcf51f0d4407df3d3a8377cb99c2d8" +[[projects]] + digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" + name = "gopkg.in/fsnotify.v1" + packages = ["."] + pruneopts = "UT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + source = "https://github.com/fsnotify/fsnotify.git" + version = "v1.4.7" + [[projects]] digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" @@ -171,6 +239,14 @@ revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" +[[projects]] + branch = "v1" + digest = "1:0caa92e17bc0b65a98c63e5bc76a9e844cd5e56493f8fdbb28fad101a16254d9" + name = "gopkg.in/tomb.v1" + packages = ["."] + pruneopts = "UT" + revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" + [[projects]] digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" name = "gopkg.in/yaml.v2" @@ -180,11 +256,10 @@ version = "v2.2.2" [[projects]] - digest = "1:4304137d46b791f0d5280f65728be893c00dd2511f78a8a6505072271480ed75" + digest = "1:8e939f902f5481b3c11d317274d0be331f284bf2a52ef3e4863d96e4499f3a90" name = "k8s.io/api" packages = [ "admissionregistration/v1beta1", - "core/v1", "rbac/v1", ] pruneopts = "UT" @@ -232,20 +307,6 @@ revision = "86fb29eff6288413d76bd8506874fddd9fccdff0" version = "kubernetes-1.13.4" -[[projects]] - branch = "master" - digest = "1:28a32d7ac148ae6f81d5903f6455ae6530c19ba48708d0366263d61679945b13" - name = "k8s.io/gengo" - packages = [ - "args", - "generator", - "namer", - "parser", - "types", - ] - pruneopts = "UT" - revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" - [[projects]] digest = "1:72fd56341405f53c745377e0ebc4abeff87f1a048e0eea6568a20212650f5a82" name = "k8s.io/klog" @@ -254,31 +315,30 @@ revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" version = "v0.2.0" +[[projects]] + digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849" + name = "sigs.k8s.io/yaml" + packages = ["."] + pruneopts = "UT" + revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" + version = "v1.1.0" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ - "github.com/ghodss/yaml", "github.com/markbates/inflect", - "github.com/pkg/errors", - "github.com/spf13/afero", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", "github.com/spf13/cobra", - "github.com/spf13/pflag", + "golang.org/x/tools/go/packages", + "golang.org/x/tools/go/packages/packagestest", "k8s.io/api/admissionregistration/v1beta1", - "k8s.io/api/core/v1", "k8s.io/api/rbac/v1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/intstr", - "k8s.io/apimachinery/pkg/util/sets", - "k8s.io/gengo/args", - "k8s.io/gengo/generator", - "k8s.io/gengo/namer", - "k8s.io/gengo/parser", - "k8s.io/gengo/types", + "sigs.k8s.io/yaml", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index aea49822d..a733f9b9a 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -14,28 +14,13 @@ ignored = [ name="k8s.io/apimachinery" version="kubernetes-1.13.4" -[[constraint]] - name = "github.com/spf13/afero" - version = "v1.1.1" - [[constraint]] name = "github.com/spf13/cobra" version = "v0.0.3" - + [[constraint]] - name = "github.com/spf13/pflag" - version = "v1.0.1" - -[[constraint]] - name = "github.com/ghodss/yaml" - version = "1.0.0" - -# TODO(droot): pin the following deps to a version when they are tagged. -#[[constraint]] -# name = "github.com/go-openapi/spec" -# -#[[constraint]] -# name = "sigs.k8s.io/testing_frameworks" + name = "sigs.k8s.io/yaml" + version = "v1.1.0" # For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 [[override]] diff --git a/go.mod b/go.mod index 53fc39f6b..2c1c6cf58 100644 --- a/go.mod +++ b/go.mod @@ -3,30 +3,28 @@ module sigs.k8s.io/controller-tools go 1.12 require ( - github.com/ghodss/yaml v1.0.0 github.com/gobuffalo/envy v1.6.15 // indirect github.com/gogo/protobuf v1.2.1 // indirect github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/json-iterator/go v1.1.6 // indirect - github.com/kr/pty v1.1.4 // indirect github.com/markbates/inflect v1.0.4 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/pkg/errors v0.8.1 + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 github.com/rogpeppe/go-internal v1.2.2 // indirect - github.com/spf13/afero v1.2.2 github.com/spf13/cobra v0.0.3 - github.com/spf13/pflag v1.0.3 - github.com/stretchr/objx v0.1.1 // indirect - golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 // indirect - golang.org/x/tools v0.0.0-20190213015956-f7e1b50d2251 // indirect + github.com/spf13/pflag v1.0.3 // indirect + golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 // indirect + golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 // indirect + golang.org/x/text v0.3.2 // indirect + golang.org/x/tools v0.0.0-20190501045030-23463209683d gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.2.2 // indirect k8s.io/api v0.0.0-20190222213804-5cb15d344471 k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 - k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 k8s.io/klog v0.2.0 // indirect - sigs.k8s.io/yaml v1.1.0 // indirect + sigs.k8s.io/yaml v1.1.0 ) diff --git a/go.sum b/go.sum index 5066bf73a..5a484c8ea 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,19 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/envy v1.6.15 h1:OsV5vOpHYUpP7ZLS6sem1y40/lNX1BZj+ynMiRi21lQ= github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= @@ -23,7 +25,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= @@ -32,41 +33,56 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.1 h1:T/TUR2vJ/xlMEeIdm+mHat3xThjLEvLrp7AsD417dI0= -github.com/rogpeppe/go-internal v1.2.1/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2 h1:J7U/N7eRtzjhs26d6GqMh2HBuXP8/Z64Densiiieafo= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M= -github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= -golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 h1:cGjJzUd8RgBw428LXP65YXni0aiGNA4Bl+ls8SmLOm8= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190213015956-f7e1b50d2251 h1:xuKXiu1bbHcCT36Y7Mco01xC52B0CgM7iAOtnaalwrI= -golang.org/x/tools v0.0.0-20190213015956-f7e1b50d2251/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE= +golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= @@ -75,8 +91,6 @@ k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 h1:JfFtjaElBIg k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg= k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc0..000000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore new file mode 100644 index 000000000..6d9953c3c --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.gitignore @@ -0,0 +1,3 @@ +.test +.go + diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/hpcloud/tail/.travis.yml new file mode 100644 index 000000000..9cf8bb7fc --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.travis.yml @@ -0,0 +1,18 @@ +language: go + +script: + - go test -race -v ./... + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/fsnotify.v1 + - go get gopkg.in/tomb.v1 diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md new file mode 100644 index 000000000..422790c07 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/CHANGES.md @@ -0,0 +1,63 @@ +# API v1 (gopkg.in/hpcloud/tail.v1) + +## April, 2016 + +* Migrated to godep, as depman is not longer supported +* Introduced golang vendoring feature +* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file + +## July, 2015 + +* Fix inotify watcher leak; remove `Cleanup` (#51) + +# API v0 (gopkg.in/hpcloud/tail.v0) + +## June, 2015 + +* Don't return partial lines (PR #40) +* Use stable version of fsnotify (#46) + +## July, 2014 + +* Fix tail for Windows (PR #36) + +## May, 2014 + +* Improved rate limiting using leaky bucket (PR #29) +* Fix odd line splitting (PR #30) + +## Apr, 2014 + +* LimitRate now discards read buffer (PR #28) +* allow reading of longer lines if MaxLineSize is unset (PR #24) +* updated deps.json to latest fsnotify (441bbc86b1) + +## Feb, 2014 + +* added `Config.Logger` to suppress library logging + +## Nov, 2013 + +* add Cleanup to remove leaky inotify watches (PR #20) + +## Aug, 2013 + +* redesigned Location field (PR #12) +* add tail.Tell (PR #14) + +## July, 2013 + +* Rate limiting (PR #10) + +## May, 2013 + +* Detect file deletions/renames in polling file watcher (PR #1) +* Detect file truncation +* Fix potential race condition when reopening the file (issue 5) +* Fix potential blocking of `tail.Stop` (issue 4) +* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop +* Support Follow=false + +## Feb, 2013 + +* Initial open source release diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile new file mode 100644 index 000000000..cd297b940 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Dockerfile @@ -0,0 +1,19 @@ +FROM golang + +RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/ +ADD . $GOPATH/src/github.com/hpcloud/tail/ + +# expecting to fetch dependencies successfully. +RUN go get -v github.com/hpcloud/tail + +# expecting to run the test successfully. +RUN go test -v github.com/hpcloud/tail + +# expecting to install successfully +RUN go install -v github.com/hpcloud/tail +RUN go install -v github.com/hpcloud/tail/cmd/gotail + +RUN $GOPATH/bin/gotail -h || true + +ENV PATH $GOPATH/bin:$PATH +CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt new file mode 100644 index 000000000..818d802a5 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/LICENSE.txt @@ -0,0 +1,21 @@ +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile new file mode 100644 index 000000000..6591b24fc --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Makefile @@ -0,0 +1,11 @@ +default: test + +test: *.go + go test -v -race ./... + +fmt: + gofmt -w . + +# Run the test in an isolated environment. +fulltest: + docker build -t hpcloud/tail . diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md new file mode 100644 index 000000000..fb7fbc26c --- /dev/null +++ b/vendor/github.com/hpcloud/tail/README.md @@ -0,0 +1,28 @@ +[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail) +[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail) + +# Go package for tail-ing files + +A Go package striving to emulate the features of the BSD `tail` program. + +```Go +t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) +for line := range t.Lines { + fmt.Println(line.Text) +} +``` + +See [API documentation](http://godoc.org/github.com/hpcloud/tail). + +## Log rotation + +Tail comes with full support for truncation/move detection as it is +designed to work with log rotation tools. + +## Installing + + go get github.com/hpcloud/tail/... + +## Windows support + +This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/hpcloud/tail/appveyor.yml new file mode 100644 index 000000000..d370055b6 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/appveyor.yml @@ -0,0 +1,11 @@ +version: 0.{build} +skip_tags: true +cache: C:\Users\appveyor\AppData\Local\NuGet\Cache +build_script: +- SET GOPATH=c:\workspace +- go test -v -race ./... +test: off +clone_folder: c:\workspace\src\github.com\hpcloud\tail +branches: + only: + - master diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/hpcloud/tail/ratelimiter/Licence new file mode 100644 index 000000000..434aab19f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/Licence @@ -0,0 +1,7 @@ +Copyright (C) 2013 99designs + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go new file mode 100644 index 000000000..358b69e7f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go @@ -0,0 +1,97 @@ +// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. +package ratelimiter + +import ( + "time" +) + +type LeakyBucket struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time + Now func() time.Time +} + +func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { + bucket := LeakyBucket{ + Size: size, + Fill: 0, + LeakInterval: leakInterval, + Now: time.Now, + Lastupdate: time.Now(), + } + + return &bucket +} + +func (b *LeakyBucket) updateFill() { + now := b.Now() + if b.Fill > 0 { + elapsed := now.Sub(b.Lastupdate) + + b.Fill -= float64(elapsed) / float64(b.LeakInterval) + if b.Fill < 0 { + b.Fill = 0 + } + } + b.Lastupdate = now +} + +func (b *LeakyBucket) Pour(amount uint16) bool { + b.updateFill() + + var newfill float64 = b.Fill + float64(amount) + + if newfill > float64(b.Size) { + return false + } + + b.Fill = newfill + + return true +} + +// The time at which this bucket will be completely drained +func (b *LeakyBucket) DrainedAt() time.Time { + return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) +} + +// The duration until this bucket is completely drained +func (b *LeakyBucket) TimeToDrain() time.Duration { + return b.DrainedAt().Sub(b.Now()) +} + +func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { + return b.Now().Sub(b.Lastupdate) +} + +type LeakyBucketSer struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time +} + +func (b *LeakyBucket) Serialise() *LeakyBucketSer { + bucket := LeakyBucketSer{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + } + + return &bucket +} + +func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { + bucket := LeakyBucket{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + Now: time.Now, + } + + return &bucket +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go new file mode 100644 index 000000000..8f6a5784a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go @@ -0,0 +1,58 @@ +package ratelimiter + +import ( + "errors" + "time" +) + +const GC_SIZE int = 100 + +type Memory struct { + store map[string]LeakyBucket + lastGCCollected time.Time +} + +func NewMemory() *Memory { + m := new(Memory) + m.store = make(map[string]LeakyBucket) + m.lastGCCollected = time.Now() + return m +} + +func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { + + bucket, ok := m.store[key] + if !ok { + return nil, errors.New("miss") + } + + return &bucket, nil +} + +func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { + + if len(m.store) > GC_SIZE { + m.GarbageCollect() + } + + m.store[key] = bucket + + return nil +} + +func (m *Memory) GarbageCollect() { + now := time.Now() + + // rate limit GC to once per minute + if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() { + + for key, bucket := range m.store { + // if the bucket is drained, then GC + if bucket.DrainedAt().Unix() > now.Unix() { + delete(m.store, key) + } + } + + m.lastGCCollected = now + } +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go new file mode 100644 index 000000000..89b2fe882 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go @@ -0,0 +1,6 @@ +package ratelimiter + +type Storage interface { + GetBucketFor(string) (*LeakyBucket, error) + SetBucketFor(string, LeakyBucket) error +} diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/hpcloud/tail/tail.go new file mode 100644 index 000000000..2d252d605 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail.go @@ -0,0 +1,438 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package tail + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/hpcloud/tail/ratelimiter" + "github.com/hpcloud/tail/util" + "github.com/hpcloud/tail/watch" + "gopkg.in/tomb.v1" +) + +var ( + ErrStop = fmt.Errorf("tail should now stop") +) + +type Line struct { + Text string + Time time.Time + Err error // Error from tail +} + +// NewLine returns a Line with present time. +func NewLine(text string) *Line { + return &Line{text, time.Now(), nil} +} + +// SeekInfo represents arguments to `os.Seek` +type SeekInfo struct { + Offset int64 + Whence int // os.SEEK_* +} + +type logger interface { + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + Fatalln(v ...interface{}) + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) + Panicln(v ...interface{}) + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// Config is used to specify how a file must be tailed. +type Config struct { + // File-specifc + Location *SeekInfo // Seek to this location before tailing + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using inotify + Pipe bool // Is a named pipe (mkfifo) + RateLimiter *ratelimiter.LeakyBucket + + // Generic IO + Follow bool // Continue looking for new lines (tail -f) + MaxLineSize int // If non-zero, split longer lines into multiple lines + + // Logger, when nil, is set to tail.DefaultLogger + // To disable logging: set field to tail.DiscardingLogger + Logger logger +} + +type Tail struct { + Filename string + Lines chan *Line + Config + + file *os.File + reader *bufio.Reader + + watcher watch.FileWatcher + changes *watch.FileChanges + + tomb.Tomb // provides: Done, Kill, Dying + + lk sync.Mutex +} + +var ( + // DefaultLogger is used when Config.Logger == nil + DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) + // DiscardingLogger can be used to disable logging output + DiscardingLogger = log.New(ioutil.Discard, "", 0) +) + +// TailFile begins tailing the file. Output stream is made available +// via the `Tail.Lines` channel. To handle errors during tailing, +// invoke the `Wait` or `Err` method after finishing reading from the +// `Lines` channel. +func TailFile(filename string, config Config) (*Tail, error) { + if config.ReOpen && !config.Follow { + util.Fatal("cannot set ReOpen without Follow.") + } + + t := &Tail{ + Filename: filename, + Lines: make(chan *Line), + Config: config, + } + + // when Logger was not specified in config, use default logger + if t.Logger == nil { + t.Logger = log.New(os.Stderr, "", log.LstdFlags) + } + + if t.Poll { + t.watcher = watch.NewPollingFileWatcher(filename) + } else { + t.watcher = watch.NewInotifyFileWatcher(filename) + } + + if t.MustExist { + var err error + t.file, err = OpenFile(t.Filename) + if err != nil { + return nil, err + } + } + + go t.tailFileSync() + + return t, nil +} + +// Return the file's current position, like stdio's ftell(). +// But this value is not very accurate. +// it may readed one line in the chan(tail.Lines), +// so it may lost one line. +func (tail *Tail) Tell() (offset int64, err error) { + if tail.file == nil { + return + } + offset, err = tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return + } + + tail.lk.Lock() + defer tail.lk.Unlock() + if tail.reader == nil { + return + } + + offset -= int64(tail.reader.Buffered()) + return +} + +// Stop stops the tailing activity. +func (tail *Tail) Stop() error { + tail.Kill(nil) + return tail.Wait() +} + +// StopAtEOF stops tailing as soon as the end of the file is reached. +func (tail *Tail) StopAtEOF() error { + tail.Kill(errStopAtEOF) + return tail.Wait() +} + +var errStopAtEOF = errors.New("tail: stop at eof") + +func (tail *Tail) close() { + close(tail.Lines) + tail.closeFile() +} + +func (tail *Tail) closeFile() { + if tail.file != nil { + tail.file.Close() + tail.file = nil + } +} + +func (tail *Tail) reopen() error { + tail.closeFile() + for { + var err error + tail.file, err = OpenFile(tail.Filename) + if err != nil { + if os.IsNotExist(err) { + tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) + if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { + if err == tomb.ErrDying { + return err + } + return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) + } + continue + } + return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) + } + break + } + return nil +} + +func (tail *Tail) readLine() (string, error) { + tail.lk.Lock() + line, err := tail.reader.ReadString('\n') + tail.lk.Unlock() + if err != nil { + // Note ReadString "returns the data read before the error" in + // case of an error, including EOF, so we return it as is. The + // caller is expected to process it if err is EOF. + return line, err + } + + line = strings.TrimRight(line, "\n") + + return line, err +} + +func (tail *Tail) tailFileSync() { + defer tail.Done() + defer tail.close() + + if !tail.MustExist { + // deferred first open. + err := tail.reopen() + if err != nil { + if err != tomb.ErrDying { + tail.Kill(err) + } + return + } + } + + // Seek to requested location on first open of the file. + if tail.Location != nil { + _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) + tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location) + if err != nil { + tail.Killf("Seek error on %s: %s", tail.Filename, err) + return + } + } + + tail.openReader() + + var offset int64 = 0 + var err error + + // Read line by line. + for { + // do not seek in named pipes + if !tail.Pipe { + // grab the position in case we need to back up in the event of a half-line + offset, err = tail.Tell() + if err != nil { + tail.Kill(err) + return + } + } + + line, err := tail.readLine() + + // Process `line` even if err is EOF. + if err == nil { + cooloff := !tail.sendLine(line) + if cooloff { + // Wait a second before seeking till the end of + // file when rate limit is reached. + msg := fmt.Sprintf( + "Too much log activity; waiting a second " + + "before resuming tailing") + tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)} + select { + case <-time.After(time.Second): + case <-tail.Dying(): + return + } + if err := tail.seekEnd(); err != nil { + tail.Kill(err) + return + } + } + } else if err == io.EOF { + if !tail.Follow { + if line != "" { + tail.sendLine(line) + } + return + } + + if tail.Follow && line != "" { + // this has the potential to never return the last line if + // it's not followed by a newline; seems a fair trade here + err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0}) + if err != nil { + tail.Kill(err) + return + } + } + + // When EOF is reached, wait for more data to become + // available. Wait strategy is based on the `tail.watcher` + // implementation (inotify or polling). + err := tail.waitForChanges() + if err != nil { + if err != ErrStop { + tail.Kill(err) + } + return + } + } else { + // non-EOF error + tail.Killf("Error reading %s: %s", tail.Filename, err) + return + } + + select { + case <-tail.Dying(): + if tail.Err() == errStopAtEOF { + continue + } + return + default: + } + } +} + +// waitForChanges waits until the file has been appended, deleted, +// moved or truncated. When moved or deleted - the file will be +// reopened if ReOpen is true. Truncated files are always reopened. +func (tail *Tail) waitForChanges() error { + if tail.changes == nil { + pos, err := tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) + if err != nil { + return err + } + } + + select { + case <-tail.changes.Modified: + return nil + case <-tail.changes.Deleted: + tail.changes = nil + if tail.ReOpen { + // XXX: we must not log from a library. + tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened %s", tail.Filename) + tail.openReader() + return nil + } else { + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop + } + case <-tail.changes.Truncated: + // Always reopen truncated files (Follow is true) + tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) + tail.openReader() + return nil + case <-tail.Dying(): + return ErrStop + } + panic("unreachable") +} + +func (tail *Tail) openReader() { + if tail.MaxLineSize > 0 { + // add 2 to account for newline characters + tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) + } else { + tail.reader = bufio.NewReader(tail.file) + } +} + +func (tail *Tail) seekEnd() error { + return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END}) +} + +func (tail *Tail) seekTo(pos SeekInfo) error { + _, err := tail.file.Seek(pos.Offset, pos.Whence) + if err != nil { + return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) + } + // Reset the read buffer whenever the file is re-seek'ed + tail.reader.Reset(tail.file) + return nil +} + +// sendLine sends the line(s) to Lines channel, splitting longer lines +// if necessary. Return false if rate limit is reached. +func (tail *Tail) sendLine(line string) bool { + now := time.Now() + lines := []string{line} + + // Split longer lines + if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { + lines = util.PartitionString(line, tail.MaxLineSize) + } + + for _, line := range lines { + tail.Lines <- &Line{line, now, nil} + } + + if tail.Config.RateLimiter != nil { + ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) + if !ok { + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n", + tail.Filename) + return false + } + } + + return true +} + +// Cleanup removes inotify watches added by the tail package. This function is +// meant to be invoked from a process's exit handler. Linux kernel may not +// automatically remove inotify watches after the process exits. +func (tail *Tail) Cleanup() { + watch.Cleanup(tail.Filename) +} diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go new file mode 100644 index 000000000..bc4dc3357 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_posix.go @@ -0,0 +1,11 @@ +// +build linux darwin freebsd netbsd openbsd + +package tail + +import ( + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return os.Open(name) +} diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go new file mode 100644 index 000000000..ef2cfca1b --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package tail + +import ( + "github.com/hpcloud/tail/winfile" + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return winfile.OpenFile(name, os.O_RDONLY, 0) +} diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go new file mode 100644 index 000000000..54151fe39 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/util/util.go @@ -0,0 +1,48 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package util + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Logger struct { + *log.Logger +} + +var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} + +// fatal is like panic except it displays only the current goroutine's stack. +func Fatal(format string, v ...interface{}) { + // https://github.com/hpcloud/log/blob/master/log.go#L45 + LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) + os.Exit(1) +} + +// partitionString partitions the string into chunks of given size, +// with the last chunk of variable size. +func PartitionString(s string, chunkSize int) []string { + if chunkSize <= 0 { + panic("invalid chunkSize") + } + length := len(s) + chunks := 1 + length/chunkSize + start := 0 + end := chunkSize + parts := make([]string, 0, chunks) + for { + if end > length { + end = length + } + parts = append(parts, s[start:end]) + if end == length { + break + } + start, end = end, end+chunkSize + } + return parts +} diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go new file mode 100644 index 000000000..3ce5dcecb --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/filechanges.go @@ -0,0 +1,36 @@ +package watch + +type FileChanges struct { + Modified chan bool // Channel to get notified of modifications + Truncated chan bool // Channel to get notified of truncations + Deleted chan bool // Channel to get notified of deletions/renames +} + +func NewFileChanges() *FileChanges { + return &FileChanges{ + make(chan bool), make(chan bool), make(chan bool)} +} + +func (fc *FileChanges) NotifyModified() { + sendOnlyIfEmpty(fc.Modified) +} + +func (fc *FileChanges) NotifyTruncated() { + sendOnlyIfEmpty(fc.Truncated) +} + +func (fc *FileChanges) NotifyDeleted() { + sendOnlyIfEmpty(fc.Deleted) +} + +// sendOnlyIfEmpty sends on a bool channel only if the channel has no +// backlog to be read by other goroutines. This concurrency pattern +// can be used to notify other goroutines if and only if they are +// looking for it (i.e., subsequent notifications can be compressed +// into one). +func sendOnlyIfEmpty(ch chan bool) { + select { + case ch <- true: + default: + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go new file mode 100644 index 000000000..4478f1e1a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify.go @@ -0,0 +1,128 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" + "gopkg.in/tomb.v1" +) + +// InotifyFileWatcher uses inotify to monitor file changes. +type InotifyFileWatcher struct { + Filename string + Size int64 +} + +func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { + fw := &InotifyFileWatcher{filepath.Clean(filename), 0} + return fw +} + +func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + err := WatchCreate(fw.Filename) + if err != nil { + return err + } + defer RemoveWatchCreate(fw.Filename) + + // Do a real check now as the file might have been created before + // calling `WatchFlags` above. + if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { + // file exists, or stat returned an error. + return err + } + + events := Events(fw.Filename) + + for { + select { + case evt, ok := <-events: + if !ok { + return fmt.Errorf("inotify watcher has been closed") + } + evtName, err := filepath.Abs(evt.Name) + if err != nil { + return err + } + fwFilename, err := filepath.Abs(fw.Filename) + if err != nil { + return err + } + if evtName == fwFilename { + return nil + } + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + err := Watch(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + fw.Size = pos + + go func() { + defer RemoveWatch(fw.Filename) + + events := Events(fw.Filename) + + for { + prevSize := fw.Size + + var evt fsnotify.Event + var ok bool + + select { + case evt, ok = <-events: + if !ok { + return + } + case <-t.Dying(): + return + } + + switch { + case evt.Op&fsnotify.Remove == fsnotify.Remove: + fallthrough + + case evt.Op&fsnotify.Rename == fsnotify.Rename: + changes.NotifyDeleted() + return + + case evt.Op&fsnotify.Write == fsnotify.Write: + fi, err := os.Stat(fw.Filename) + if err != nil { + if os.IsNotExist(err) { + changes.NotifyDeleted() + return + } + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + fw.Size = fi.Size() + + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + } else { + changes.NotifyModified() + } + prevSize = fw.Size + } + } + }() + + return changes, nil +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go new file mode 100644 index 000000000..03be4275c --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go @@ -0,0 +1,260 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "log" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" +) + +type InotifyTracker struct { + mux sync.Mutex + watcher *fsnotify.Watcher + chans map[string]chan fsnotify.Event + done map[string]chan bool + watchNums map[string]int + watch chan *watchInfo + remove chan *watchInfo + error chan error +} + +type watchInfo struct { + op fsnotify.Op + fname string +} + +func (this *watchInfo) isCreate() bool { + return this.op == fsnotify.Create +} + +var ( + // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used + shared *InotifyTracker + + // these are used to ensure the shared InotifyTracker is run exactly once + once = sync.Once{} + goRun = func() { + shared = &InotifyTracker{ + mux: sync.Mutex{}, + chans: make(map[string]chan fsnotify.Event), + done: make(map[string]chan bool), + watchNums: make(map[string]int), + watch: make(chan *watchInfo), + remove: make(chan *watchInfo), + error: make(chan error), + } + go shared.run() + } + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +// Watch signals the run goroutine to begin watching the input filename +func Watch(fname string) error { + return watch(&watchInfo{ + fname: fname, + }) +} + +// Watch create signals the run goroutine to begin watching the input filename +// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate +func WatchCreate(fname string) error { + return watch(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func watch(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.watch <- winfo + return <-shared.error +} + +// RemoveWatch signals the run goroutine to remove the watch for the input filename +func RemoveWatch(fname string) { + remove(&watchInfo{ + fname: fname, + }) +} + +// RemoveWatch create signals the run goroutine to remove the watch for the input filename +func RemoveWatchCreate(fname string) { + remove(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func remove(winfo *watchInfo) { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.mux.Lock() + done := shared.done[winfo.fname] + if done != nil { + delete(shared.done, winfo.fname) + close(done) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() + + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + shared.watcher.Remove(fname) + } + shared.remove <- winfo +} + +// Events returns a channel to which FileEvents corresponding to the input filename +// will be sent. This channel will be closed when removeWatch is called on this +// filename. +func Events(fname string) <-chan fsnotify.Event { + shared.mux.Lock() + defer shared.mux.Unlock() + + return shared.chans[fname] +} + +// Cleanup removes the watch for the input filename if necessary. +func Cleanup(fname string) { + RemoveWatch(fname) +} + +// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating +// a new Watcher if the previous Watcher was closed. +func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { + shared.mux.Lock() + defer shared.mux.Unlock() + + if shared.chans[winfo.fname] == nil { + shared.chans[winfo.fname] = make(chan fsnotify.Event) + shared.done[winfo.fname] = make(chan bool) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + + // already in inotify watch + if shared.watchNums[fname] > 0 { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + return nil + } + + err := shared.watcher.Add(fname) + if err == nil { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + } + return err +} + +// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the +// corresponding events channel. +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) { + shared.mux.Lock() + defer shared.mux.Unlock() + + ch := shared.chans[winfo.fname] + if ch == nil { + return + } + + delete(shared.chans, winfo.fname) + close(ch) + + if !winfo.isCreate() { + return + } + + shared.watchNums[winfo.fname]-- + if shared.watchNums[winfo.fname] == 0 { + delete(shared.watchNums, winfo.fname) + } +} + +// sendEvent sends the input event to the appropriate Tail. +func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { + name := filepath.Clean(event.Name) + + shared.mux.Lock() + ch := shared.chans[name] + done := shared.done[name] + shared.mux.Unlock() + + if ch != nil && done != nil { + select { + case ch <- event: + case <-done: + } + } +} + +// run starts the goroutine in which the shared struct reads events from its +// Watcher's Event channel and sends the events to the appropriate Tail. +func (shared *InotifyTracker) run() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + util.Fatal("failed to create Watcher") + } + shared.watcher = watcher + + for { + select { + case winfo := <-shared.watch: + shared.error <- shared.addWatch(winfo) + + case winfo := <-shared.remove: + shared.removeWatch(winfo) + + case event, open := <-shared.watcher.Events: + if !open { + return + } + shared.sendEvent(event) + + case err, open := <-shared.watcher.Errors: + if !open { + return + } else if err != nil { + sysErr, ok := err.(*os.SyscallError) + if !ok || sysErr.Err != syscall.EINTR { + logger.Printf("Error in Watcher Error channel: %s", err) + } + } + } + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go new file mode 100644 index 000000000..49491f21d --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/polling.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "os" + "runtime" + "time" + + "github.com/hpcloud/tail/util" + "gopkg.in/tomb.v1" +) + +// PollingFileWatcher polls the file for changes. +type PollingFileWatcher struct { + Filename string + Size int64 +} + +func NewPollingFileWatcher(filename string) *PollingFileWatcher { + fw := &PollingFileWatcher{filename, 0} + return fw +} + +var POLL_DURATION time.Duration + +func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + for { + if _, err := os.Stat(fw.Filename); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + select { + case <-time.After(POLL_DURATION): + continue + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + origFi, err := os.Stat(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + var prevModTime time.Time + + // XXX: use tomb.Tomb to cleanly manage these goroutines. replace + // the fatal (below) with tomb's Kill. + + fw.Size = pos + + go func() { + prevSize := fw.Size + for { + select { + case <-t.Dying(): + return + default: + } + + time.Sleep(POLL_DURATION) + fi, err := os.Stat(fw.Filename) + if err != nil { + // Windows cannot delete a file if a handle is still open (tail keeps one open) + // so it gives access denied to anything trying to read it until all handles are released. + if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { + // File does not exist (has been deleted). + changes.NotifyDeleted() + return + } + + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + + // File got moved/renamed? + if !os.SameFile(origFi, fi) { + changes.NotifyDeleted() + return + } + + // File got truncated? + fw.Size = fi.Size() + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + prevSize = fw.Size + continue + } + // File got bigger? + if prevSize > 0 && prevSize < fw.Size { + changes.NotifyModified() + prevSize = fw.Size + continue + } + prevSize = fw.Size + + // File was appended to (changed)? + modTime := fi.ModTime() + if modTime != prevModTime { + prevModTime = modTime + changes.NotifyModified() + } + } + }() + + return changes, nil +} + +func init() { + POLL_DURATION = 250 * time.Millisecond +} diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go new file mode 100644 index 000000000..2e1783ef0 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/watch.go @@ -0,0 +1,20 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import "gopkg.in/tomb.v1" + +// FileWatcher monitors file-level events. +type FileWatcher interface { + // BlockUntilExists blocks until the file comes into existence. + BlockUntilExists(*tomb.Tomb) error + + // ChangeEvents reports on changes to a file, be it modification, + // deletion, renames or truncations. Returned FileChanges group of + // channels will be closed, thus become unusable, after a deletion + // or truncation event. + // In order to properly report truncations, ChangeEvents requires + // the caller to pass their current offset in the file. + ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) +} diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/hpcloud/tail/winfile/winfile.go new file mode 100644 index 000000000..aa7e7bc5d --- /dev/null +++ b/vendor/github.com/hpcloud/tail/winfile/winfile.go @@ -0,0 +1,92 @@ +// +build windows + +package winfile + +import ( + "os" + "syscall" + "unsafe" +) + +// issue also described here +//https://codereview.appspot.com/8203043/ + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 +func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 +func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { + r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= syscall.S_ISVTX + } + // No mapping for Go's ModeTemporary (plan9 only). + return +} diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore new file mode 100644 index 000000000..b9f9659d2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +TODO +tmp/**/* +*.coverprofile +.vscode +.idea/ +*.log diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml new file mode 100644 index 000000000..72e8ccf0b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +install: + - go get -v -t ./... + - go get golang.org/x/tools/cmd/cover + - go get github.com/onsi/gomega + - go install github.com/onsi/ginkgo/ginkgo + - export PATH=$PATH:$HOME/gopath/bin + +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md new file mode 100644 index 000000000..4920406ae --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -0,0 +1,218 @@ +## 1.8.0 + +### New Features +- allow config of the vet flag for `go test` (#562) [3cd45fa] +- Support projects using go modules [d56ee76] + +### Fixes and Minor Improvements +- chore(godoc): fixes typos in Measurement funcs [dbaca8e] +- Optimize focus to avoid allocations [f493786] +- Ensure generated test file names are underscored [505cc35] + +## 1.7.0 + +### New Features +- Add JustAfterEach (#484) [0d4f080] + +### Fixes +- Correctly round suite time in junit reporter [2445fc1] +- Avoid using -i argument to go test for Golang 1.10+ [46bbc26] + +## 1.6.0 + +### New Features +- add --debug flag to emit node output to files (#499) [39febac] + +### Fixes +- fix: for `go vet` to pass [69338ec] +- docs: fix for contributing instructions [7004cb1] +- consolidate and streamline contribution docs (#494) [d848015] +- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6] +- all: gofmt [000d317] +- Increase eventually timeout to 30s [c73579c] +- Clarify asynchronous test behaviour [294d8f4] +- Travis badge should only show master [26d2143] + +## 1.5.0 5/10/2018 + +### New Features +- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8] +- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8] +- Re-add noisySkippings flag [652e15c] +- Allow coverage to be displayed for focused specs (#367) [11459a8] +- Handle -outputdir flag (#364) [228e3a8] +- Handle -coverprofile flag (#355) [43392d5] + +### Fixes +- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23] +- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] +- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] +- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] +- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c] +- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] +- Add an extra new line after reporting spec run completion for test2json [874520d] +- added name name field to junit reported testsuite [ae61c63] +- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] +- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] +- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] +- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] +- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] +- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] +- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa] +- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8] +- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9] +- Replace GOPATH in Environment [4b883f0] + + +## 1.4.0 7/16/2017 + +- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345) +- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357] +- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356] +- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277] +- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344] +- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248] + +## 1.3.0 3/28/2017 + +Improvements: + +- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency. +- `Skip(message)` can be used to skip the current test. +- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) +- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed` +- Support for retrying flaky tests with `--flakeAttempts` +- `ginkgo ./...` now recurses as you'd expect +- Added `Specify` a synonym for `It` +- Support colorise on Windows +- Broader support for various go compilation flags in the `ginkgo` CLI + +Bug Fixes: + +- Ginkgo tests now fail when you `panic(nil)` (#167) + +## 1.2.0 5/31/2015 + +Improvements + +- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160) +- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152) +- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166) + +## 1.2.0-beta + +Ginkgo now requires Go 1.4+ + +Improvements: + +- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does. +- Improved focus behavior. Now, this: + + ```golang + FDescribe("Some describe", func() { + It("A", func() {}) + + FIt("B", func() {}) + }) + ``` + + will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests. +- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests. +- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs. +- Improved output when an error occurs in a setup or teardown block. +- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order. +- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`. +- Add support for precompiled tests: + - `ginkgo build ` will now compile the package, producing a file named `package.test` + - The compiled `package.test` file can be run directly. This runs the tests in series. + - To run precompiled tests in parallel, you can run: `ginkgo -p package.test` +- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs. +- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory +- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump. +- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+. +- `ginkgo -notify` now works on Linux + +Bug Fixes: + +- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0. +- Fix tempfile leak when running in parallel +- Fix incorrect failure message when a panic occurs during a parallel test run +- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. +- Be more consistent about handling SIGTERM as well as SIGINT +- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. +- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!) + +## 1.1.0 (8/2/2014) + +No changes, just dropping the beta. + +## 1.1.0-beta (7/22/2014) +New Features: + +- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag. +- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites. +- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes. +- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. +- `ginkgo --failFast` aborts the test suite after the first failure. +- `ginkgo generate file_1 file_2` can take multiple file arguments. +- Ginkgo now summarizes any spec failures that occured at the end of the test run. +- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. + +Improvements: + +- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped. +- `ginkgo --untilItFails` no longer recompiles between attempts. +- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed. + +Bug Fixes: + +- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`. +- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic + +## 1.0.0 (5/24/2014) +New Features: + +- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode` + +Improvements: + +- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor. +- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified. + +Bug Fixes: + +- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s. +- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail. +- Fix all remaining race conditions in Ginkgo's test suite. + +## 1.0.0-beta (4/14/2014) +Breaking changes: + +- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead +- Modified the Reporter interface +- `watch` is now a subcommand, not a flag. + +DSL changes: + +- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites. +- `AfterSuite` is triggered on interrupt (`^C`) as well as exit. +- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes. + +CLI changes: + +- `watch` is now a subcommand, not a flag +- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot` +- Additional arguments can be passed to specs. Pass them after the `--` separator +- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp. +- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Ginkgo's internal to `internal` +- Rename `example` everywhere to `spec` +- Much more! diff --git a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md new file mode 100644 index 000000000..908b95c2c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md @@ -0,0 +1,33 @@ +# Contributing to Ginkgo + +Your contributions to Ginkgo are essential for its long-term maintenance and improvement. + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). + - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. +- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch. + If relevant, please submit a docs PR to that branch alongside your code PR. + +Thanks for supporting Ginkgo! + +## Setup + +Fork the repo, then: + +``` +go get github.com/onsi/ginkgo +go get github.com/onsi/gomega/... +cd $GOPATH/src/github.com/onsi/ginkgo +git remote add fork git@github.com:/ginkgo.git + +ginkgo -r -p # ensure tests are green +go vet ./... # ensure linter is happy +``` + +## Making the PR + - go to a new branch `git checkout -b my-feature` + - make your changes + - run tests and linter again (see above) + - `git push fork` + - open PR 🎉 diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/LICENSE new file mode 100644 index 000000000..9415ee72c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md new file mode 100644 index 000000000..cdf8d054a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -0,0 +1,121 @@ +![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) + +[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) + +Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! + +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. + +## Feature List + +- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite) + +- Structure your BDD-style tests expressively: + - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) + - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown + - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions + - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). + - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. + +- A comprehensive test runner that lets you: + - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs) + - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line + - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. + - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs) + +- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: + - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime + - `ginkgo -cover` runs your tests using Go's code coverage tool + - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package + - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression + - `ginkgo -r` runs all tests suites under the current directory + - `ginkgo -v` prints out identifying information for each tests just before it runs + + And much more: run `ginkgo help` for details! + + The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test` + +- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! + +- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests) + +- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. + +- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. + +- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. + +- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. + +- A modular architecture that lets you easily: + - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). + - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo + +## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library + +Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/) + +## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework + +Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org) + +## Set Me Up! + +You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed. + +```bash + +go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI +go get -u github.com/onsi/gomega/... # fetches the matcher library + +cd path/to/package/you/want/to/test + +ginkgo bootstrap # set up a new ginkgo suite +ginkgo generate # will create a sample test file. edit this file and add your tests then... + +go test # to run your tests + +ginkgo # also runs your tests + +``` + +## I'm new to Go: What are my testing options? + +Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set. + +With that said, it's great to know what your options are :) + +### What Go gives you out of the box + +Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. + +### Matcher libraries for Go's XUnit style tests + +A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: + +- [testify](https://github.com/stretchr/testify) +- [gocheck](http://labix.org/gocheck) + +You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) + +### BDD style testing frameworks + +There are a handful of BDD-style testing frameworks written for Go. Here are a few: + +- [Ginkgo](https://github.com/onsi/ginkgo) ;) +- [GoConvey](https://github.com/smartystreets/goconvey) +- [Goblin](https://github.com/franela/goblin) +- [Mao](https://github.com/azer/mao) +- [Zen](https://github.com/pranavraja/zen) + +Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries. + +Go explore! + +## License + +Ginkgo is MIT-Licensed + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/RELEASING.md new file mode 100644 index 000000000..1e298c2da --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/RELEASING.md @@ -0,0 +1,14 @@ +A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: + +1. Ensure CHANGELOG.md is up to date. + - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Categorize the changes into + - Breaking Changes (requires a major version) + - New Features (minor version) + - Fixes (fix version) + - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) +1. Update `VERSION` in `config/config.go` +1. Create a commit with the version number as the commit message (e.g. `v1.3.0`) +1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`) +1. Push the commit and tag to GitHub +1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go new file mode 100644 index 000000000..dab2a2470 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -0,0 +1,200 @@ +/* +Ginkgo accepts a number of configuration options. + +These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) + +You can also learn more via + + ginkgo help + +or (I kid you not): + + go test -asdf +*/ +package config + +import ( + "flag" + "time" + + "fmt" +) + +const VERSION = "1.8.0" + +type GinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusString string + SkipString string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +var GinkgoConfig = GinkgoConfigType{} + +type DefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool +} + +var DefaultReporterConfig = DefaultReporterConfigType{} + +func processPrefix(prefix string) string { + if prefix != "" { + prefix = prefix + "." + } + return prefix +} + +func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { + prefix = processPrefix(prefix) + flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") + flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.") + flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") + flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") + flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") + + flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") + + flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") + flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") + + flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") + + flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.") + + flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") + + flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.") + + if includeParallelFlags { + flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") + flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") + flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") + flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") + } + + flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") + flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.") + flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") + flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") + flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") +} + +func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { + prefix = processPrefix(prefix) + result := make([]string, 0) + + if ginkgo.RandomSeed > 0 { + result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) + } + + if ginkgo.RandomizeAllSpecs { + result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) + } + + if ginkgo.SkipMeasurements { + result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) + } + + if ginkgo.FailOnPending { + result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) + } + + if ginkgo.FailFast { + result = append(result, fmt.Sprintf("--%sfailFast", prefix)) + } + + if ginkgo.DryRun { + result = append(result, fmt.Sprintf("--%sdryRun", prefix)) + } + + if ginkgo.FocusString != "" { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + } + + if ginkgo.SkipString != "" { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) + } + + if ginkgo.FlakeAttempts > 1 { + result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts)) + } + + if ginkgo.EmitSpecProgress { + result = append(result, fmt.Sprintf("--%sprogress", prefix)) + } + + if ginkgo.DebugParallel { + result = append(result, fmt.Sprintf("--%sdebug", prefix)) + } + + if ginkgo.ParallelNode != 0 { + result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) + } + + if ginkgo.ParallelTotal != 0 { + result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) + } + + if ginkgo.StreamHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) + } + + if ginkgo.SyncHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) + } + + if ginkgo.RegexScansFilePath { + result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix)) + } + + if reporter.NoColor { + result = append(result, fmt.Sprintf("--%snoColor", prefix)) + } + + if reporter.SlowSpecThreshold > 0 { + result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) + } + + if !reporter.NoisyPendings { + result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) + } + + if !reporter.NoisySkippings { + result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix)) + } + + if reporter.Verbose { + result = append(result, fmt.Sprintf("--%sv", prefix)) + } + + if reporter.Succinct { + result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) + } + + if reporter.FullTrace { + result = append(result, fmt.Sprintf("--%strace", prefix)) + } + + return result +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go new file mode 100644 index 000000000..a6b96d88f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -0,0 +1,619 @@ +/* +Ginkgo is a BDD-style testing framework for Golang + +The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ + +Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Ginkgo is MIT-Licensed +*/ +package ginkgo + +import ( + "flag" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/remote" + "github.com/onsi/ginkgo/internal/suite" + "github.com/onsi/ginkgo/internal/testingtproxy" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" + "github.com/onsi/ginkgo/types" +) + +const GINKGO_VERSION = config.VERSION +const GINKGO_PANIC = ` +Your test failed. +Ginkgo panics to prevent subsequent assertions from running. +Normally Ginkgo rescues this panic so you shouldn't see it. + +But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. +` +const defaultTimeout = 1 + +var globalSuite *suite.Suite +var globalFailer *failer.Failer + +func init() { + config.Flags(flag.CommandLine, "ginkgo", true) + GinkgoWriter = writer.New(os.Stdout) + globalFailer = failer.New() + globalSuite = suite.New(globalFailer) +} + +//GinkgoWriter implements an io.Writer +//When running in verbose mode any writes to GinkgoWriter will be immediately printed +//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +//only if the current test fails. +var GinkgoWriter io.Writer + +//The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is +//useful for seeding your own pseudorandom number generators (PRNGs) to ensure +//consistent executions from run to run, where your tests contain variability (for +//example, when selecting random test data). +func GinkgoRandomSeed() int64 { + return config.GinkgoConfig.RandomSeed +} + +//GinkgoParallelNode returns the parallel node number for the current ginkgo process +//The node number is 1-indexed +func GinkgoParallelNode() int { + return config.GinkgoConfig.ParallelNode +} + +//Some matcher libraries or legacy codebases require a *testing.T +//GinkgoT implements an interface analogous to *testing.T and can be used if +//the library in question accepts *testing.T through an interface +// +// For example, with testify: +// assert.Equal(GinkgoT(), 123, 123, "they should be equal") +// +// Or with gomock: +// gomock.NewController(GinkgoT()) +// +// GinkgoT() takes an optional offset argument that can be used to get the +// correct line number associated with the failure. +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, offset) +} + +//The interface returned by GinkgoT(). This covers most of the methods +//in the testing package's T. +type GinkgoTInterface interface { + Fail() + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Failed() bool + Parallel() + Skip(args ...interface{}) + Skipf(format string, args ...interface{}) + SkipNow() + Skipped() bool +} + +//Custom Ginkgo test reporters must implement the Reporter interface. +// +//The custom reporter is passed in a SuiteSummary when the suite begins and ends, +//and a SpecSummary just before a spec begins and just after a spec ends +type Reporter reporters.Reporter + +//Asynchronous specs are given a channel of the Done type. You must close or write to the channel +//to tell Ginkgo that your async test is done. +type Done chan<- interface{} + +//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription +// FullTestText: a concatenation of ComponentTexts and the TestText +// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test +// TestText: the text in the actual It or Measure node +// IsMeasurement: true if the current test is a measurement +// FileName: the name of the file containing the current test +// LineNumber: the line number for the current test +// Failed: if the current test has failed, this will be true (useful in an AfterEach) +type GinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + IsMeasurement bool + + FileName string + LineNumber int + + Failed bool + Duration time.Duration +} + +//CurrentGinkgoTestDescripton returns information about the current running test. +func CurrentGinkgoTestDescription() GinkgoTestDescription { + summary, ok := globalSuite.CurrentRunningSpecSummary() + if !ok { + return GinkgoTestDescription{} + } + + subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] + + return GinkgoTestDescription{ + ComponentTexts: summary.ComponentTexts[1:], + FullTestText: strings.Join(summary.ComponentTexts[1:], " "), + TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], + IsMeasurement: summary.IsMeasurement, + FileName: subjectCodeLocation.FileName, + LineNumber: subjectCodeLocation.LineNumber, + Failed: summary.HasFailureState(), + Duration: summary.RunTime, + } +} + +//Measurement tests receive a Benchmarker. +// +//You use the Time() function to time how long the passed in body function takes to run +//You use the RecordValue() function to track arbitrary numerical measurements. +//The RecordValueWithPrecision() function can be used alternatively to provide the unit +//and resolution of the numeric measurement. +//The optional info argument is passed to the test reporter and can be used to +// provide the measurement data to a custom reporter with context. +// +//See http://onsi.github.io/ginkgo/#benchmark_tests for more details +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) +} + +//RunSpecs is the entry point for the Ginkgo test runner. +//You must call this within a Golang testing TestX(t *testing.T) function. +// +//To bootstrap a test suite you can use the Ginkgo CLI: +// +// ginkgo bootstrap +func RunSpecs(t GinkgoTestingT, description string) bool { + specReporters := []Reporter{buildDefaultReporter()} + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace +//RunSpecs() with this method. +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + specReporters = append(specReporters, buildDefaultReporter()) + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace +//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + writer := GinkgoWriter.(*writer.Writer) + writer.SetStream(config.DefaultReporterConfig.Verbose) + reporters := make([]reporters.Reporter, len(specReporters)) + for i, reporter := range specReporters { + reporters[i] = reporter + } + passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +func buildDefaultReporter() Reporter { + remoteReportingServer := config.GinkgoConfig.StreamHost + if remoteReportingServer == "" { + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) + return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) + } else { + debugFile := "" + if config.GinkgoConfig.DebugParallel { + debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode) + } + return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile) + } +} + +//Skip notifies Ginkgo that the current spec was skipped. +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Skip(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Fail(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +//calls out to Gomega +// +//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +//further assertions from running. This panic must be recovered. Ginkgo does this for you +//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) +// +//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no +//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. +func GinkgoRecover() { + e := recover() + if e != nil { + globalFailer.Panic(codelocation.New(1), e) + } +} + +//Describe blocks allow you to organize your specs. A Describe block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func Describe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FDescribe +func FDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PDescribe +func PDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XDescribe +func XDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//Context blocks allow you to organize your specs. A Context block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func Context(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FContext +func FContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PContext +func PContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XContext +func XContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//When blocks allow you to organize your specs. A When block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func When(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FWhen +func FWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PWhen +func PWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XWhen +func XWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks +//within an It block. +// +//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a +//function that accepts a Done channel. When you do this, you can also provide an optional timeout. +func It(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Its using FIt +func FIt(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Its as pending using PIt +func PIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Its as pending using XIt +func XIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//Specify blocks are aliases for It blocks and allow for more natural wording in situations +//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks +//which apply to It blocks. +func Specify(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Specifys using FSpecify +func FSpecify(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Specifys as pending using PSpecify +func PSpecify(text string, is ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Specifys as pending using XSpecify +func XSpecify(text string, is ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//By allows you to better document large Its. +// +//Generally you should try to keep your Its short and to the point. This is not always possible, however, +//especially in the context of integration tests that capture a particular workflow. +// +//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...) +//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. +func By(text string, callbacks ...func()) { + preamble := "\x1b[1mSTEP\x1b[0m" + if config.DefaultReporterConfig.NoColor { + preamble = "STEP" + } + fmt.Fprintln(GinkgoWriter, preamble+": "+text) + if len(callbacks) == 1 { + callbacks[0]() + } + if len(callbacks) > 1 { + panic("just one callback per By, please") + } +} + +//Measure blocks run the passed in body function repeatedly (determined by the samples argument) +//and accumulate metrics provided to the Benchmarker by the body function. +// +//The body function must have the signature: +// func(b Benchmarker) +func Measure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) + return true +} + +//You can focus individual Measures using FMeasure +func FMeasure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) + return true +} + +//You can mark Measurements as pending using PMeasure +func PMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Measurements as pending using XMeasure +func XMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each +//parallel node process will call BeforeSuite. +// +//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func BeforeSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. +//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. +// +//When running in parallel, each parallel node process will call AfterSuite. +// +//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func AfterSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across +//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that +//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait +//until that node is done before running. +// +//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is +//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) +//to the second function (on all the other nodes). +// +//The functions have the following signatures. The first function (which only runs on node 1) has the signature: +// +// func() []byte +// +//or, to run asynchronously: +// +// func(done Done) []byte +// +//The byte array returned by the first function is then passed to the second function, which has the signature: +// +// func(data []byte) +// +//or, to run asynchronously: +// +// func(data []byte, done Done) +// +//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: +// +// var dbClient db.Client +// var dbRunner db.Runner +// +// var _ = SynchronizedBeforeSuite(func() []byte { +// dbRunner = db.NewRunner() +// err := dbRunner.Start() +// Ω(err).ShouldNot(HaveOccurred()) +// return []byte(dbRunner.URL) +// }, func(data []byte) { +// dbClient = db.NewClient() +// err := dbClient.Connect(string(data)) +// Ω(err).ShouldNot(HaveOccurred()) +// }) +func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedBeforeSuiteNode( + node1Body, + allNodesBody, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up +//external singleton resources shared across nodes when running tests in parallel. +// +//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 +//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until +//all other nodes are finished. +// +//Both functions have the same signature: either func() or func(done Done) to run asynchronously. +// +//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database +//only after all nodes have finished: +// +// var _ = SynchronizedAfterSuite(func() { +// dbClient.Cleanup() +// }, func() { +// dbRunner.Stop() +// }) +func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedAfterSuiteNode( + allNodesBody, + node1Body, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested +//Describe and Context blocks the outermost BeforeEach blocks are run first. +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func BeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustBeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustAfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested +//Describe and Context blocks the innermost AfterEach blocks are run first. +// +//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func AfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +func parseTimeout(timeout ...float64) time.Duration { + if len(timeout) == 0 { + return time.Duration(defaultTimeout * int64(time.Second)) + } else { + return time.Duration(timeout[0] * float64(time.Second)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go new file mode 100644 index 000000000..fa2f0bf73 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go @@ -0,0 +1,32 @@ +package codelocation + +import ( + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo/types" +) + +func New(skip int) types.CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip) + return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go new file mode 100644 index 000000000..0737746dc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go @@ -0,0 +1,151 @@ +package containernode + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type subjectOrContainerNode struct { + containerNode *ContainerNode + subjectNode leafnodes.SubjectNode +} + +func (n subjectOrContainerNode) text() string { + if n.containerNode != nil { + return n.containerNode.Text() + } else { + return n.subjectNode.Text() + } +} + +type CollatedNodes struct { + Containers []*ContainerNode + Subject leafnodes.SubjectNode +} + +type ContainerNode struct { + text string + flag types.FlagType + codeLocation types.CodeLocation + + setupNodes []leafnodes.BasicNode + subjectAndContainerNodes []subjectOrContainerNode +} + +func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { + return &ContainerNode{ + text: text, + flag: flag, + codeLocation: codeLocation, + } +} + +func (container *ContainerNode) Shuffle(r *rand.Rand) { + sort.Sort(container) + permutation := r.Perm(len(container.subjectAndContainerNodes)) + shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) + for i, j := range permutation { + shuffledNodes[i] = container.subjectAndContainerNodes[j] + } + container.subjectAndContainerNodes = shuffledNodes +} + +func (node *ContainerNode) BackPropagateProgrammaticFocus() bool { + if node.flag == types.FlagTypePending { + return false + } + + shouldUnfocus := false + for _, subjectOrContainerNode := range node.subjectAndContainerNodes { + if subjectOrContainerNode.containerNode != nil { + shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus + } else { + shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus + } + } + + if shouldUnfocus { + if node.flag == types.FlagTypeFocused { + node.flag = types.FlagTypeNone + } + return true + } + + return node.flag == types.FlagTypeFocused +} + +func (node *ContainerNode) Collate() []CollatedNodes { + return node.collate([]*ContainerNode{}) +} + +func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { + collated := make([]CollatedNodes, 0) + + containers := make([]*ContainerNode, len(enclosingContainers)) + copy(containers, enclosingContainers) + containers = append(containers, node) + + for _, subjectOrContainer := range node.subjectAndContainerNodes { + if subjectOrContainer.containerNode != nil { + collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) + } else { + collated = append(collated, CollatedNodes{ + Containers: containers, + Subject: subjectOrContainer.subjectNode, + }) + } + } + + return collated +} + +func (node *ContainerNode) PushContainerNode(container *ContainerNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) +} + +func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) +} + +func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { + node.setupNodes = append(node.setupNodes, setupNode) +} + +func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { + nodes := []leafnodes.BasicNode{} + for _, setupNode := range node.setupNodes { + if setupNode.Type() == nodeType { + nodes = append(nodes, setupNode) + } + } + return nodes +} + +func (node *ContainerNode) Text() string { + return node.text +} + +func (node *ContainerNode) CodeLocation() types.CodeLocation { + return node.codeLocation +} + +func (node *ContainerNode) Flag() types.FlagType { + return node.flag +} + +//sort.Interface + +func (node *ContainerNode) Len() int { + return len(node.subjectAndContainerNodes) +} + +func (node *ContainerNode) Less(i, j int) bool { + return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() +} + +func (node *ContainerNode) Swap(i, j int) { + node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go new file mode 100644 index 000000000..678ea2514 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go @@ -0,0 +1,92 @@ +package failer + +import ( + "fmt" + "sync" + + "github.com/onsi/ginkgo/types" +) + +type Failer struct { + lock *sync.Mutex + failure types.SpecFailure + state types.SpecState +} + +func New() *Failer { + return &Failer{ + lock: &sync.Mutex{}, + state: types.SpecStatePassed, + } +} + +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStatePanicked + f.failure = types.SpecFailure{ + Message: "Test Panicked", + Location: location, + ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), + } + } +} + +func (f *Failer) Timeout(location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateTimedOut + f.failure = types.SpecFailure{ + Message: "Timed out", + Location: location, + } + } +} + +func (f *Failer) Fail(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateFailed + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { + f.lock.Lock() + defer f.lock.Unlock() + + failure := f.failure + outcome := f.state + if outcome != types.SpecStatePassed { + failure.ComponentType = componentType + failure.ComponentIndex = componentIndex + failure.ComponentCodeLocation = componentCodeLocation + } + + f.state = types.SpecStatePassed + f.failure = types.SpecFailure{} + + return failure, outcome +} + +func (f *Failer) Skip(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateSkipped + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go new file mode 100644 index 000000000..d6d54234c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -0,0 +1,103 @@ +package leafnodes + +import ( + "math" + "time" + + "sync" + + "github.com/onsi/ginkgo/types" +) + +type benchmarker struct { + mu sync.Mutex + measurements map[string]*types.SpecMeasurement + orderCounter int +} + +func newBenchmarker() *benchmarker { + return &benchmarker{ + measurements: make(map[string]*types.SpecMeasurement, 0), + } +} + +func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { + t := time.Now() + body() + elapsedTime = time.Since(t) + + b.mu.Lock() + defer b.mu.Unlock() + measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...) + measurement.Results = append(measurement.Results, elapsedTime.Seconds()) + + return +} + +func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { + b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) { + b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement { + measurement, ok := b.measurements[name] + if !ok { + var computedInfo interface{} + computedInfo = nil + if len(info) > 0 { + computedInfo = info[0] + } + measurement = &types.SpecMeasurement{ + Name: name, + Info: computedInfo, + Order: b.orderCounter, + SmallestLabel: smallestLabel, + LargestLabel: largestLabel, + AverageLabel: averageLabel, + Units: units, + Precision: precision, + Results: make([]float64, 0), + } + b.measurements[name] = measurement + b.orderCounter++ + } + + return measurement +} + +func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { + b.mu.Lock() + defer b.mu.Unlock() + for _, measurement := range b.measurements { + measurement.Smallest = math.MaxFloat64 + measurement.Largest = -math.MaxFloat64 + sum := float64(0) + sumOfSquares := float64(0) + + for _, result := range measurement.Results { + if result > measurement.Largest { + measurement.Largest = result + } + if result < measurement.Smallest { + measurement.Smallest = result + } + sum += result + sumOfSquares += result * result + } + + n := float64(len(measurement.Results)) + measurement.Average = sum / n + measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) + } + + return b.measurements +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go new file mode 100644 index 000000000..8c3902d60 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go @@ -0,0 +1,19 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/types" +) + +type BasicNode interface { + Type() types.SpecComponentType + Run() (types.SpecState, types.SpecFailure) + CodeLocation() types.CodeLocation +} + +type SubjectNode interface { + BasicNode + + Text() string + Flag() types.FlagType + Samples() int +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go new file mode 100644 index 000000000..6eded7b76 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go @@ -0,0 +1,47 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type ItNode struct { + runner *runner + + flag types.FlagType + text string +} + +func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { + return &ItNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), + flag: flag, + text: text, + } +} + +func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *ItNode) Type() types.SpecComponentType { + return types.SpecComponentTypeIt +} + +func (node *ItNode) Text() string { + return node.text +} + +func (node *ItNode) Flag() types.FlagType { + return node.flag +} + +func (node *ItNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *ItNode) Samples() int { + return 1 +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go new file mode 100644 index 000000000..3ab9a6d55 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go @@ -0,0 +1,62 @@ +package leafnodes + +import ( + "reflect" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type MeasureNode struct { + runner *runner + + text string + flag types.FlagType + samples int + benchmarker *benchmarker +} + +func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { + benchmarker := newBenchmarker() + + wrappedBody := func() { + reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) + } + + return &MeasureNode{ + runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), + + text: text, + flag: flag, + samples: samples, + benchmarker: benchmarker, + } +} + +func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { + return node.benchmarker.measurementsReport() +} + +func (node *MeasureNode) Type() types.SpecComponentType { + return types.SpecComponentTypeMeasure +} + +func (node *MeasureNode) Text() string { + return node.text +} + +func (node *MeasureNode) Flag() types.FlagType { + return node.flag +} + +func (node *MeasureNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *MeasureNode) Samples() int { + return node.samples +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go new file mode 100644 index 000000000..16cb66c3e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go @@ -0,0 +1,117 @@ +package leafnodes + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type runner struct { + isAsync bool + asyncFunc func(chan<- interface{}) + syncFunc func() + codeLocation types.CodeLocation + timeoutThreshold time.Duration + nodeType types.SpecComponentType + componentIndex int + failer *failer.Failer +} + +func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { + bodyType := reflect.TypeOf(body) + if bodyType.Kind() != reflect.Func { + panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) + } + + runner := &runner{ + codeLocation: codeLocation, + timeoutThreshold: timeout, + failer: failer, + nodeType: nodeType, + componentIndex: componentIndex, + } + + switch bodyType.NumIn() { + case 0: + runner.syncFunc = body.(func()) + return runner + case 1: + if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { + panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) + } + + wrappedBody := func(done chan<- interface{}) { + bodyValue := reflect.ValueOf(body) + bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) + } + + runner.isAsync = true + runner.asyncFunc = wrappedBody + return runner + } + + panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) +} + +func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { + if r.isAsync { + return r.runAsync() + } else { + return r.runSync() + } +} + +func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { + done := make(chan interface{}, 1) + + go func() { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + select { + case <-done: + break + default: + close(done) + } + } + }() + + r.asyncFunc(done) + finished = true + }() + + // If this goroutine gets no CPU time before the select block, + // the <-done case may complete even if the test took longer than the timeoutThreshold. + // This can cause flaky behaviour, but we haven't seen it in the wild. + select { + case <-done: + case <-time.After(r.timeoutThreshold): + r.failer.Timeout(r.codeLocation) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + return +} +func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + }() + + r.syncFunc() + finished = true + + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go new file mode 100644 index 000000000..e3e9cb7c5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go @@ -0,0 +1,48 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type SetupNode struct { + runner *runner +} + +func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *SetupNode) Type() types.SpecComponentType { + return node.runner.nodeType +} + +func (node *SetupNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), + } +} + +func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), + } +} + +func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), + } +} + +func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go new file mode 100644 index 000000000..80f16ed78 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go @@ -0,0 +1,55 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type SuiteNode interface { + Run(parallelNode int, parallelTotal int, syncHost string) bool + Passed() bool + Summary() *types.SetupSummary +} + +type simpleSuiteNode struct { + runner *runner + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + node.outcome, node.failure = node.runner.run() + node.runTime = time.Since(t) + + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runner.nodeType, + CodeLocation: node.runner.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), + } +} + +func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go new file mode 100644 index 000000000..a721d0cf7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go @@ -0,0 +1,90 @@ +package leafnodes + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type synchronizedAfterSuiteNode struct { + runnerA *runner + runnerB *runner + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &synchronizedAfterSuiteNode{ + runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} + +func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + node.outcome, node.failure = node.runnerA.run() + + if parallelNode == 1 { + if parallelTotal > 1 { + node.waitUntilOtherNodesAreDone(syncHost) + } + + outcome, failure := node.runnerB.run() + + if node.outcome == types.SpecStatePassed { + node.outcome, node.failure = outcome, failure + } + } + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { + for { + if node.canRun(syncHost) { + return + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { + resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") + if err != nil || resp.StatusCode != http.StatusOK { + return false + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false + } + resp.Body.Close() + + afterSuiteData := types.RemoteAfterSuiteData{} + err = json.Unmarshal(body, &afterSuiteData) + if err != nil { + return false + } + + return afterSuiteData.CanRun +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go new file mode 100644 index 000000000..d5c889319 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go @@ -0,0 +1,181 @@ +package leafnodes + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "reflect" + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type synchronizedBeforeSuiteNode struct { + runnerA *runner + runnerB *runner + + data []byte + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + node := &synchronizedBeforeSuiteNode{} + + node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + + return node +} + +func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + defer func() { + node.runTime = time.Since(t) + }() + + if parallelNode == 1 { + node.outcome, node.failure = node.runA(parallelTotal, syncHost) + } else { + node.outcome, node.failure = node.waitForA(syncHost) + } + + if node.outcome != types.SpecStatePassed { + return false + } + node.outcome, node.failure = node.runnerB.run() + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { + outcome, failure := node.runnerA.run() + + if parallelTotal > 1 { + state := types.RemoteBeforeSuiteStatePassed + if outcome != types.SpecStatePassed { + state = types.RemoteBeforeSuiteStateFailed + } + json := (types.RemoteBeforeSuiteData{ + Data: node.data, + State: state, + }).ToJSON() + http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) + } + + return outcome, failure +} + +func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { + failure := func(message string) types.SpecFailure { + return types.SpecFailure{ + Message: message, + Location: node.runnerA.codeLocation, + ComponentType: node.runnerA.nodeType, + ComponentIndex: node.runnerA.componentIndex, + ComponentCodeLocation: node.runnerA.codeLocation, + } + } + for { + resp, err := http.Get(syncHost + "/BeforeSuiteState") + if err != nil || resp.StatusCode != http.StatusOK { + return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return types.SpecStateFailed, failure("Failed to read BeforeSuite state") + } + resp.Body.Close() + + beforeSuiteData := types.RemoteBeforeSuiteData{} + err = json.Unmarshal(body, &beforeSuiteData) + if err != nil { + return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") + } + + switch beforeSuiteData.State { + case types.RemoteBeforeSuiteStatePassed: + node.data = beforeSuiteData.Data + return types.SpecStatePassed, types.SpecFailure{} + case types.RemoteBeforeSuiteStateFailed: + return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") + case types.RemoteBeforeSuiteStateDisappeared: + return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedBeforeSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { + typeA := reflect.TypeOf(bodyA) + if typeA.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its first argument") + } + + takesNothing := typeA.NumIn() == 0 + takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface + returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 + + if !((takesNothing || takesADoneChannel) && returnsBytes) { + panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") + } + + if takesADoneChannel { + return func(done chan<- interface{}) { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) + node.data = out[0].Interface().([]byte) + } + } + + return func() { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) + node.data = out[0].Interface().([]byte) + } +} + +func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { + typeB := reflect.TypeOf(bodyB) + if typeB.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its second argument") + } + + returnsNothing := typeB.NumOut() == 0 + takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 + takesBytesAndDone := typeB.NumIn() == 2 && + typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && + typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface + + if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { + panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") + } + + if takesBytesAndDone { + return func(done chan<- interface{}) { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) + } + } + + return func() { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go new file mode 100644 index 000000000..6b54afe01 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -0,0 +1,249 @@ +/* + +Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output +coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: + + ginkgo -nodes=N + +where N is the number of nodes you desire. +*/ +package remote + +import ( + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type configAndSuite struct { + config config.GinkgoConfigType + summary *types.SuiteSummary +} + +type Aggregator struct { + nodeCount int + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + result chan bool + + suiteBeginnings chan configAndSuite + aggregatedSuiteBeginnings []configAndSuite + + beforeSuites chan *types.SetupSummary + aggregatedBeforeSuites []*types.SetupSummary + + afterSuites chan *types.SetupSummary + aggregatedAfterSuites []*types.SetupSummary + + specCompletions chan *types.SpecSummary + completedSpecs []*types.SpecSummary + + suiteEndings chan *types.SuiteSummary + aggregatedSuiteEndings []*types.SuiteSummary + specs []*types.SpecSummary + + startTime time.Time +} + +func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { + aggregator := &Aggregator{ + nodeCount: nodeCount, + result: result, + config: config, + stenographer: stenographer, + + suiteBeginnings: make(chan configAndSuite, 0), + beforeSuites: make(chan *types.SetupSummary, 0), + afterSuites: make(chan *types.SetupSummary, 0), + specCompletions: make(chan *types.SpecSummary, 0), + suiteEndings: make(chan *types.SuiteSummary, 0), + } + + go aggregator.mux() + + return aggregator +} + +func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + aggregator.suiteBeginnings <- configAndSuite{config, summary} +} + +func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.beforeSuites <- setupSummary +} + +func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.afterSuites <- setupSummary +} + +func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { + //noop +} + +func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { + aggregator.specCompletions <- specSummary +} + +func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { + aggregator.suiteEndings <- summary +} + +func (aggregator *Aggregator) mux() { +loop: + for { + select { + case configAndSuite := <-aggregator.suiteBeginnings: + aggregator.registerSuiteBeginning(configAndSuite) + case setupSummary := <-aggregator.beforeSuites: + aggregator.registerBeforeSuite(setupSummary) + case setupSummary := <-aggregator.afterSuites: + aggregator.registerAfterSuite(setupSummary) + case specSummary := <-aggregator.specCompletions: + aggregator.registerSpecCompletion(specSummary) + case suite := <-aggregator.suiteEndings: + finished, passed := aggregator.registerSuiteEnding(suite) + if finished { + aggregator.result <- passed + break loop + } + } + } +} + +func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { + aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) + + if len(aggregator.aggregatedSuiteBeginnings) == 1 { + aggregator.startTime = time.Now() + } + + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) + + totalNumberOfSpecs := 0 + if len(aggregator.aggregatedSuiteBeginnings) > 0 { + totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization + } + + aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) + aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { + aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) + aggregator.specs = append(aggregator.specs, specSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) flushCompletedSpecs() { + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + for _, setupSummary := range aggregator.aggregatedBeforeSuites { + aggregator.announceBeforeSuite(setupSummary) + } + + for _, specSummary := range aggregator.completedSpecs { + aggregator.announceSpec(specSummary) + } + + for _, setupSummary := range aggregator.aggregatedAfterSuites { + aggregator.announceAfterSuite(setupSummary) + } + + aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} + aggregator.completedSpecs = []*types.SpecSummary{} + aggregator.aggregatedAfterSuites = []*types.SetupSummary{} +} + +func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { + if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + aggregator.stenographer.AnnounceSpecWillRun(specSummary) + } + + aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) + + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct) + } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { + aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct) + } else { + aggregator.stenographer.AnnounceSuccesfulSpec(specSummary) + } + + case types.SpecStatePending: + aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) + case types.SpecStateSkipped: + aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace) + case types.SpecStateTimedOut: + aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStatePanicked: + aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStateFailed: + aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { + aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) + if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { + return false, false + } + + aggregatedSuiteSummary := &types.SuiteSummary{} + aggregatedSuiteSummary.SuiteSucceeded = true + + for _, suiteSummary := range aggregator.aggregatedSuiteEndings { + if suiteSummary.SuiteSucceeded == false { + aggregatedSuiteSummary.SuiteSucceeded = false + } + + aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun + aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs + aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs + aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs + aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs + aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs + aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs + } + + aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) + + aggregator.stenographer.SummarizeFailures(aggregator.specs) + aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) + + return true, aggregatedSuiteSummary.SuiteSucceeded +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go new file mode 100644 index 000000000..284bc62e5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go @@ -0,0 +1,147 @@ +package remote + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//An interface to net/http's client to allow the injection of fakes under test +type Poster interface { + Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) +} + +/* +The ForwardingReporter is a Ginkgo reporter that forwards information to +a Ginkgo remote server. + +When streaming parallel test output, this repoter is automatically installed by Ginkgo. + +This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner +detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter +in place of Ginkgo's DefaultReporter. +*/ + +type ForwardingReporter struct { + serverHost string + poster Poster + outputInterceptor OutputInterceptor + debugMode bool + debugFile *os.File + nestedReporter *reporters.DefaultReporter +} + +func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter { + reporter := &ForwardingReporter{ + serverHost: serverHost, + poster: poster, + outputInterceptor: outputInterceptor, + } + + if debugFile != "" { + var err error + reporter.debugMode = true + reporter.debugFile, err = os.Create(debugFile) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + if !config.Verbose { + //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication. + ginkgoWriter.AndRedirectTo(reporter.debugFile) + } + outputInterceptor.StreamTo(reporter.debugFile) //This is not working + + stenographer := stenographer.New(false, true, reporter.debugFile) + config.Succinct = false + config.Verbose = true + config.FullTrace = true + reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer) + } + + return reporter +} + +func (reporter *ForwardingReporter) post(path string, data interface{}) { + encoded, _ := json.Marshal(data) + buffer := bytes.NewBuffer(encoded) + reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) +} + +func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { + data := struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + }{ + conf, + summary, + } + + reporter.outputInterceptor.StartInterceptingOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteWillBegin(conf, summary) + reporter.debugFile.Sync() + } + reporter.post("/SpecSuiteWillBegin", data) +} + +func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.BeforeSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } + reporter.post("/BeforeSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.debugMode { + reporter.nestedReporter.SpecWillRun(specSummary) + reporter.debugFile.Sync() + } + reporter.post("/SpecWillRun", specSummary) +} + +func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + specSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.SpecDidComplete(specSummary) + reporter.debugFile.Sync() + } + reporter.post("/SpecDidComplete", specSummary) +} + +func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.AfterSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } + reporter.post("/AfterSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.outputInterceptor.StopInterceptingAndReturnOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteDidEnd(summary) + reporter.debugFile.Sync() + } + reporter.post("/SpecSuiteDidEnd", summary) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go new file mode 100644 index 000000000..5154abe87 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go @@ -0,0 +1,13 @@ +package remote + +import "os" + +/* +The OutputInterceptor is used by the ForwardingReporter to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() error + StopInterceptingAndReturnOutput() (string, error) + StreamTo(*os.File) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go new file mode 100644 index 000000000..ab6622a29 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -0,0 +1,83 @@ +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package remote + +import ( + "errors" + "io/ioutil" + "os" + + "github.com/hpcloud/tail" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + redirectFile *os.File + streamTarget *os.File + intercepting bool + tailer *tail.Tail + doneTailing chan bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + var err error + + interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output") + if err != nil { + return err + } + + // Call a function in ./syscall_dup_*.go + // If building for everything other than linux_arm64, + // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2) + // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3 + syscallDup(int(interceptor.redirectFile.Fd()), 1) + syscallDup(int(interceptor.redirectFile.Fd()), 2) + + if interceptor.streamTarget != nil { + interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) + interceptor.doneTailing = make(chan bool) + + go func() { + for line := range interceptor.tailer.Lines { + interceptor.streamTarget.Write([]byte(line.Text + "\n")) + } + close(interceptor.doneTailing) + }() + } + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + if !interceptor.intercepting { + return "", errors.New("Not intercepting output!") + } + + interceptor.redirectFile.Close() + output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) + os.Remove(interceptor.redirectFile.Name()) + + interceptor.intercepting = false + + if interceptor.streamTarget != nil { + interceptor.tailer.Stop() + interceptor.tailer.Cleanup() + <-interceptor.doneTailing + interceptor.streamTarget.Sync() + } + + return string(output), err +} + +func (interceptor *outputInterceptor) StreamTo(out *os.File) { + interceptor.streamTarget = out +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go new file mode 100644 index 000000000..40c790336 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go @@ -0,0 +1,36 @@ +// +build windows + +package remote + +import ( + "errors" + "os" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + intercepting bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + // not working on windows... + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + // not working on windows... + interceptor.intercepting = false + + return "", nil +} + +func (interceptor *outputInterceptor) StreamTo(*os.File) {} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go new file mode 100644 index 000000000..367c54daf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -0,0 +1,224 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package remote + +import ( + "encoding/json" + "io/ioutil" + "net" + "net/http" + "sync" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +/* +Server spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type Server struct { + listener net.Listener + reporters []reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteData types.RemoteBeforeSuiteData + parallelTotal int + counter int +} + +//Create a new server, automatically selecting a port +func NewServer(parallelTotal int) (*Server, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &Server{ + listener: listener, + lock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}, + parallelTotal: parallelTotal, + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *Server) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) + mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) + mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) + mux.HandleFunc("/SpecWillRun", server.specWillRun) + mux.HandleFunc("/SpecDidComplete", server.specDidComplete) + mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) + + //synchronization endpoints + mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) + mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *Server) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *Server) Address() string { + return "http://" + server.listener.Addr().String() +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *Server) readAll(request *http.Request) []byte { + defer request.Body.Close() + body, _ := ioutil.ReadAll(request.Body) + return body +} + +func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { + server.reporters = reporters +} + +func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + + var data struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + } + + json.Unmarshal(body, &data) + + for _, reporter := range server.reporters { + reporter.SpecSuiteWillBegin(data.Config, data.Summary) + } +} + +func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.BeforeSuiteDidRun(setupSummary) + } +} + +func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.AfterSuiteDidRun(setupSummary) + } +} + +func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecWillRun(specSummary) + } +} + +func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecDidComplete(specSummary) + } +} + +func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var suiteSummary *types.SuiteSummary + json.Unmarshal(body, &suiteSummary) + + for _, reporter := range server.reporters { + reporter.SpecSuiteDidEnd(suiteSummary) + } +} + +// +// Synchronization Endpoints +// + +func (server *Server) RegisterAlive(node int, alive func() bool) { + server.lock.Lock() + defer server.lock.Unlock() + server.alives[node-1] = alive +} + +func (server *Server) nodeIsAlive(node int) bool { + server.lock.Lock() + defer server.lock.Unlock() + alive := server.alives[node-1] + if alive == nil { + return true + } + return alive() +} + +func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + if request.Method == "POST" { + dec := json.NewDecoder(request.Body) + dec.Decode(&(server.beforeSuiteData)) + } else { + beforeSuiteData := server.beforeSuiteData + if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { + beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared + } + enc := json.NewEncoder(writer) + enc.Encode(beforeSuiteData) + } +} + +func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { + afterSuiteData := types.RemoteAfterSuiteData{ + CanRun: true, + } + for i := 2; i <= server.parallelTotal; i++ { + afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) + } + + enc := json.NewEncoder(writer) + enc.Encode(afterSuiteData) +} + +func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) { + c := spec_iterator.Counter{} + server.lock.Lock() + c.Index = server.counter + server.counter = server.counter + 1 + server.lock.Unlock() + + json.NewEncoder(writer).Encode(c) +} + +func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) { + writer.Write([]byte("")) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go new file mode 100644 index 000000000..9550d37b3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go @@ -0,0 +1,11 @@ +// +build linux,arm64 + +package remote + +import "syscall" + +// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so +// use the nearly identical syscall.Dup3 instead +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup3(oldfd, newfd, 0) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go new file mode 100644 index 000000000..75ef7fb78 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go @@ -0,0 +1,9 @@ +// +build solaris + +package remote + +import "golang.org/x/sys/unix" + +func syscallDup(oldfd int, newfd int) (err error) { + return unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go new file mode 100644 index 000000000..ef6255960 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go @@ -0,0 +1,11 @@ +// +build !linux !arm64 +// +build !windows +// +build !solaris + +package remote + +import "syscall" + +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go new file mode 100644 index 000000000..7fd68ee8e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -0,0 +1,247 @@ +package spec + +import ( + "fmt" + "io" + "time" + + "sync" + + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type Spec struct { + subject leafnodes.SubjectNode + focused bool + announceProgress bool + + containers []*containernode.ContainerNode + + state types.SpecState + runTime time.Duration + startTime time.Time + failure types.SpecFailure + previousFailures bool + + stateMutex *sync.Mutex +} + +func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { + spec := &Spec{ + subject: subject, + containers: containers, + focused: subject.Flag() == types.FlagTypeFocused, + announceProgress: announceProgress, + stateMutex: &sync.Mutex{}, + } + + spec.processFlag(subject.Flag()) + for i := len(containers) - 1; i >= 0; i-- { + spec.processFlag(containers[i].Flag()) + } + + return spec +} + +func (spec *Spec) processFlag(flag types.FlagType) { + if flag == types.FlagTypeFocused { + spec.focused = true + } else if flag == types.FlagTypePending { + spec.setState(types.SpecStatePending) + } +} + +func (spec *Spec) Skip() { + spec.setState(types.SpecStateSkipped) +} + +func (spec *Spec) Failed() bool { + return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut +} + +func (spec *Spec) Passed() bool { + return spec.getState() == types.SpecStatePassed +} + +func (spec *Spec) Flaked() bool { + return spec.getState() == types.SpecStatePassed && spec.previousFailures +} + +func (spec *Spec) Pending() bool { + return spec.getState() == types.SpecStatePending +} + +func (spec *Spec) Skipped() bool { + return spec.getState() == types.SpecStateSkipped +} + +func (spec *Spec) Focused() bool { + return spec.focused +} + +func (spec *Spec) IsMeasurement() bool { + return spec.subject.Type() == types.SpecComponentTypeMeasure +} + +func (spec *Spec) Summary(suiteID string) *types.SpecSummary { + componentTexts := make([]string, len(spec.containers)+1) + componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) + + for i, container := range spec.containers { + componentTexts[i] = container.Text() + componentCodeLocations[i] = container.CodeLocation() + } + + componentTexts[len(spec.containers)] = spec.subject.Text() + componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() + + runTime := spec.runTime + if runTime == 0 && !spec.startTime.IsZero() { + runTime = time.Since(spec.startTime) + } + + return &types.SpecSummary{ + IsMeasurement: spec.IsMeasurement(), + NumberOfSamples: spec.subject.Samples(), + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.getState(), + RunTime: runTime, + Failure: spec.failure, + Measurements: spec.measurementsReport(), + SuiteID: suiteID, + } +} + +func (spec *Spec) ConcatenatedString() string { + s := "" + for _, container := range spec.containers { + s += container.Text() + " " + } + + return s + spec.subject.Text() +} + +func (spec *Spec) Run(writer io.Writer) { + if spec.getState() == types.SpecStateFailed { + spec.previousFailures = true + } + + spec.startTime = time.Now() + defer func() { + spec.runTime = time.Since(spec.startTime) + }() + + for sample := 0; sample < spec.subject.Samples(); sample++ { + spec.runSample(sample, writer) + + if spec.getState() != types.SpecStatePassed { + return + } + } +} + +func (spec *Spec) getState() types.SpecState { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + return spec.state +} + +func (spec *Spec) setState(state types.SpecState) { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + spec.state = state +} + +func (spec *Spec) runSample(sample int, writer io.Writer) { + spec.setState(types.SpecStatePassed) + spec.failure = types.SpecFailure{} + innerMostContainerIndexToUnwind := -1 + + defer func() { + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) { + spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach) + justAfterEachState, justAfterEachFailure := justAfterEach.Run() + if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { + spec.state = justAfterEachState + spec.failure = justAfterEachFailure + } + } + } + + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { + spec.announceSetupNode(writer, "AfterEach", container, afterEach) + afterEachState, afterEachFailure := afterEach.Run() + if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed { + spec.setState(afterEachState) + spec.failure = afterEachFailure + } + } + } + }() + + for i, container := range spec.containers { + innerMostContainerIndexToUnwind = i + for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { + spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) + s, f := beforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { + return + } + } + } + + for _, container := range spec.containers { + for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { + spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) + s, f := justBeforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { + return + } + } + } + + spec.announceSubject(writer, spec.subject) + s, f := spec.subject.Run() + spec.failure = f + spec.setState(s) +} + +func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { + if spec.announceProgress { + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) { + if spec.announceProgress { + nodeType := "" + switch subject.Type() { + case types.SpecComponentTypeIt: + nodeType = "It" + case types.SpecComponentTypeMeasure: + nodeType = "Measure" + } + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { + if !spec.IsMeasurement() || spec.Failed() { + return map[string]*types.SpecMeasurement{} + } + + return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go new file mode 100644 index 000000000..27c0d1d6c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -0,0 +1,144 @@ +package spec + +import ( + "math/rand" + "regexp" + "sort" +) + +type Specs struct { + specs []*Spec + names []string + + hasProgrammaticFocus bool + RegexScansFilePath bool +} + +func NewSpecs(specs []*Spec) *Specs { + names := make([]string, len(specs)) + for i, spec := range specs { + names[i] = spec.ConcatenatedString() + } + return &Specs{ + specs: specs, + names: names, + } +} + +func (e *Specs) Specs() []*Spec { + return e.specs +} + +func (e *Specs) HasProgrammaticFocus() bool { + return e.hasProgrammaticFocus +} + +func (e *Specs) Shuffle(r *rand.Rand) { + sort.Sort(e) + permutation := r.Perm(len(e.specs)) + shuffledSpecs := make([]*Spec, len(e.specs)) + names := make([]string, len(e.specs)) + for i, j := range permutation { + shuffledSpecs[i] = e.specs[j] + names[i] = e.names[j] + } + e.specs = shuffledSpecs + e.names = names +} + +func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { + if focusString == "" && skipString == "" { + e.applyProgrammaticFocus() + } else { + e.applyRegExpFocusAndSkip(description, focusString, skipString) + } +} + +func (e *Specs) applyProgrammaticFocus() { + e.hasProgrammaticFocus = false + for _, spec := range e.specs { + if spec.Focused() && !spec.Pending() { + e.hasProgrammaticFocus = true + break + } + } + + if e.hasProgrammaticFocus { + for _, spec := range e.specs { + if !spec.Focused() { + spec.Skip() + } + } + } +} + +// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function, +// this is the place which we append to. +func (e *Specs) toMatch(description string, i int) []byte { + if i > len(e.names) { + return nil + } + if e.RegexScansFilePath { + return []byte( + description + " " + + e.names[i] + " " + + e.specs[i].subject.CodeLocation().FileName) + } else { + return []byte( + description + " " + + e.names[i]) + } +} + +func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) { + var focusFilter *regexp.Regexp + if focusString != "" { + focusFilter = regexp.MustCompile(focusString) + } + var skipFilter *regexp.Regexp + if skipString != "" { + skipFilter = regexp.MustCompile(skipString) + } + + for i, spec := range e.specs { + matchesFocus := true + matchesSkip := false + + toMatch := e.toMatch(description, i) + + if focusFilter != nil { + matchesFocus = focusFilter.Match([]byte(toMatch)) + } + + if skipFilter != nil { + matchesSkip = skipFilter.Match([]byte(toMatch)) + } + + if !matchesFocus || matchesSkip { + spec.Skip() + } + } +} + +func (e *Specs) SkipMeasurements() { + for _, spec := range e.specs { + if spec.IsMeasurement() { + spec.Skip() + } + } +} + +//sort.Interface + +func (e *Specs) Len() int { + return len(e.specs) +} + +func (e *Specs) Less(i, j int) bool { + return e.names[i] < e.names[j] +} + +func (e *Specs) Swap(i, j int) { + e.names[i], e.names[j] = e.names[j], e.names[i] + e.specs[i], e.specs[j] = e.specs[j], e.specs[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go new file mode 100644 index 000000000..82272554a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go @@ -0,0 +1,55 @@ +package spec_iterator + +func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { + if length == 0 { + return 0, 0 + } + + // We have more nodes than tests. Trivial case. + if parallelTotal >= length { + if parallelNode > length { + return 0, 0 + } else { + return parallelNode - 1, 1 + } + } + + // This is the minimum amount of tests that a node will be required to run + minTestsPerNode := length / parallelTotal + + // This is the maximum amount of tests that a node will be required to run + // The algorithm guarantees that this would be equal to at least the minimum amount + // and at most one more + maxTestsPerNode := minTestsPerNode + if length%parallelTotal != 0 { + maxTestsPerNode++ + } + + // Number of nodes that will have to run the maximum amount of tests per node + numMaxLoadNodes := length % parallelTotal + + // Number of nodes that precede the current node and will have to run the maximum amount of tests per node + var numPrecedingMaxLoadNodes int + if parallelNode > numMaxLoadNodes { + numPrecedingMaxLoadNodes = numMaxLoadNodes + } else { + numPrecedingMaxLoadNodes = parallelNode - 1 + } + + // Number of nodes that precede the current node and will have to run the minimum amount of tests per node + var numPrecedingMinLoadNodes int + if parallelNode <= numMaxLoadNodes { + numPrecedingMinLoadNodes = 0 + } else { + numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 + } + + // Evaluate the test start index and number of tests to run + startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode + if parallelNode > numMaxLoadNodes { + count = minTestsPerNode + } else { + count = maxTestsPerNode + } + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go new file mode 100644 index 000000000..99f548bca --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go @@ -0,0 +1,59 @@ +package spec_iterator + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/onsi/ginkgo/internal/spec" +) + +type ParallelIterator struct { + specs []*spec.Spec + host string + client *http.Client +} + +func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { + return &ParallelIterator{ + specs: specs, + host: host, + client: &http.Client{}, + } +} + +func (s *ParallelIterator) Next() (*spec.Spec, error) { + resp, err := s.client.Get(s.host + "/counter") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) + } + + var counter Counter + err = json.NewDecoder(resp.Body).Decode(&counter) + if err != nil { + return nil, err + } + + if counter.Index >= len(s.specs) { + return nil, ErrClosed + } + + return s.specs[counter.Index], nil +} + +func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return -1, false +} + +func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + return -1, false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go new file mode 100644 index 000000000..a51c93b8b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go @@ -0,0 +1,45 @@ +package spec_iterator + +import ( + "github.com/onsi/ginkgo/internal/spec" +) + +type SerialIterator struct { + specs []*spec.Spec + index int +} + +func NewSerialIterator(specs []*spec.Spec) *SerialIterator { + return &SerialIterator{ + specs: specs, + index: 0, + } +} + +func (s *SerialIterator) Next() (*spec.Spec, error) { + if s.index >= len(s.specs) { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return len(s.specs), true +} + +func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for _, s := range s.specs { + if !s.Skipped() && !s.Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go new file mode 100644 index 000000000..ad4a3ea3c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go @@ -0,0 +1,47 @@ +package spec_iterator + +import "github.com/onsi/ginkgo/internal/spec" + +type ShardedParallelIterator struct { + specs []*spec.Spec + index int + maxIndex int +} + +func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { + startIndex, count := ParallelizedIndexRange(len(specs), total, node) + + return &ShardedParallelIterator{ + specs: specs, + index: startIndex, + maxIndex: startIndex + count, + } +} + +func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { + if s.index >= s.maxIndex { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return s.maxIndex - s.index, true +} + +func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for i := s.index; i < s.maxIndex; i += 1 { + if !s.specs[i].Skipped() && !s.specs[i].Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go new file mode 100644 index 000000000..74bffad64 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go @@ -0,0 +1,20 @@ +package spec_iterator + +import ( + "errors" + + "github.com/onsi/ginkgo/internal/spec" +) + +var ErrClosed = errors.New("no more specs to run") + +type SpecIterator interface { + Next() (*spec.Spec, error) + NumberOfSpecsPriorToIteration() int + NumberOfSpecsToProcessIfKnown() (int, bool) + NumberOfSpecsThatWillBeRunIfKnown() (int, bool) +} + +type Counter struct { + Index int `json:"index"` +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go new file mode 100644 index 000000000..a0b8b62d5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go @@ -0,0 +1,15 @@ +package specrunner + +import ( + "crypto/rand" + "fmt" +) + +func randomID() string { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + return "" + } + return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go new file mode 100644 index 000000000..2c683cb8b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -0,0 +1,411 @@ +package specrunner + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + Writer "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + + "time" +) + +type SpecRunner struct { + description string + beforeSuiteNode leafnodes.SuiteNode + iterator spec_iterator.SpecIterator + afterSuiteNode leafnodes.SuiteNode + reporters []reporters.Reporter + startTime time.Time + suiteID string + runningSpec *spec.Spec + writer Writer.WriterInterface + config config.GinkgoConfigType + interrupted bool + processedSpecs []*spec.Spec + lock *sync.Mutex +} + +func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { + return &SpecRunner{ + description: description, + beforeSuiteNode: beforeSuiteNode, + iterator: iterator, + afterSuiteNode: afterSuiteNode, + reporters: reporters, + writer: writer, + config: config, + suiteID: randomID(), + lock: &sync.Mutex{}, + } +} + +func (runner *SpecRunner) Run() bool { + if runner.config.DryRun { + runner.performDryRun() + return true + } + + runner.reportSuiteWillBegin() + signalRegistered := make(chan struct{}) + go runner.registerForInterrupts(signalRegistered) + <-signalRegistered + + suitePassed := runner.runBeforeSuite() + + if suitePassed { + suitePassed = runner.runSpecs() + } + + runner.blockForeverIfInterrupted() + + suitePassed = runner.runAfterSuite() && suitePassed + + runner.reportSuiteDidEnd(suitePassed) + + return suitePassed +} + +func (runner *SpecRunner) performDryRun() { + runner.reportSuiteWillBegin() + + if runner.beforeSuiteNode != nil { + summary := runner.beforeSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportBeforeSuite(summary) + } + + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + summary := spec.Summary(runner.suiteID) + runner.reportSpecWillRun(summary) + if summary.State == types.SpecStateInvalid { + summary.State = types.SpecStatePassed + } + runner.reportSpecDidComplete(summary, false) + } + + if runner.afterSuiteNode != nil { + summary := runner.afterSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportAfterSuite(summary) + } + + runner.reportSuiteDidEnd(true) +} + +func (runner *SpecRunner) runBeforeSuite() bool { + if runner.beforeSuiteNode == nil || runner.wasInterrupted() { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runAfterSuite() bool { + if runner.afterSuiteNode == nil { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportAfterSuite(runner.afterSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runSpecs() bool { + suiteFailed := false + skipRemainingSpecs := false + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + suiteFailed = true + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + if runner.wasInterrupted() { + break + } + if skipRemainingSpecs { + spec.Skip() + } + + if !spec.Skipped() && !spec.Pending() { + if passed := runner.runSpec(spec); !passed { + suiteFailed = true + } + } else if spec.Pending() && runner.config.FailOnPending { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + suiteFailed = true + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } else { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } + + if spec.Failed() && runner.config.FailFast { + skipRemainingSpecs = true + } + } + + return !suiteFailed +} + +func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) { + maxAttempts := 1 + if runner.config.FlakeAttempts > 0 { + // uninitialized configs count as 1 + maxAttempts = runner.config.FlakeAttempts + } + + for i := 0; i < maxAttempts; i++ { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.runningSpec = spec + spec.Run(runner.writer) + runner.runningSpec = nil + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + if !spec.Failed() { + return true + } + } + return false +} + +func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { + if runner.runningSpec == nil { + return nil, false + } + + return runner.runningSpec.Summary(runner.suiteID), true +} + +func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + close(signalRegistered) + + <-c + signal.Stop(c) + runner.markInterrupted() + go runner.registerForHardInterrupts() + runner.writer.DumpOutWithHeader(` +Received interrupt. Emitting contents of GinkgoWriter... +--------------------------------------------------------- +`) + if runner.afterSuiteNode != nil { + fmt.Fprint(os.Stderr, ` +--------------------------------------------------------- +Received interrupt. Running AfterSuite... +^C again to terminate immediately +`) + runner.runAfterSuite() + } + runner.reportSuiteDidEnd(false) + os.Exit(1) +} + +func (runner *SpecRunner) registerForHardInterrupts() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") + os.Exit(1) +} + +func (runner *SpecRunner) blockForeverIfInterrupted() { + runner.lock.Lock() + interrupted := runner.interrupted + runner.lock.Unlock() + + if interrupted { + select {} + } +} + +func (runner *SpecRunner) markInterrupted() { + runner.lock.Lock() + defer runner.lock.Unlock() + runner.interrupted = true +} + +func (runner *SpecRunner) wasInterrupted() bool { + runner.lock.Lock() + defer runner.lock.Unlock() + return runner.interrupted +} + +func (runner *SpecRunner) reportSuiteWillBegin() { + runner.startTime = time.Now() + summary := runner.suiteWillBeginSummary() + for _, reporter := range runner.reporters { + reporter.SpecSuiteWillBegin(runner.config, summary) + } +} + +func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.BeforeSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.AfterSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { + runner.writer.Truncate() + + for _, reporter := range runner.reporters { + reporter.SpecWillRun(summary) + } +} + +func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { + if failed && len(summary.CapturedOutput) == 0 { + summary.CapturedOutput = string(runner.writer.Bytes()) + } + for i := len(runner.reporters) - 1; i >= 1; i-- { + runner.reporters[i].SpecDidComplete(summary) + } + + if failed { + runner.writer.DumpOut() + } + + runner.reporters[0].SpecDidComplete(summary) +} + +func (runner *SpecRunner) reportSuiteDidEnd(success bool) { + summary := runner.suiteDidEndSummary(success) + summary.RunTime = time.Since(runner.startTime) + for _, reporter := range runner.reporters { + reporter.SpecSuiteDidEnd(summary) + } +} + +func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) { + count = 0 + + for _, spec := range runner.processedSpecs { + if filter(spec) { + count++ + } + } + + return count +} + +func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary { + numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return !ex.Skipped() && !ex.Pending() + }) + + numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Pending() + }) + + numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Skipped() + }) + + numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Passed() + }) + + numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Flaked() + }) + + numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Failed() + }) + + if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { + var known bool + numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration() + } + numberOfFailedSpecs = numberOfSpecsThatWillBeRun + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteSucceeded: success, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: len(runner.processedSpecs), + NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, + NumberOfPendingSpecs: numberOfPendingSpecs, + NumberOfSkippedSpecs: numberOfSkippedSpecs, + NumberOfPassedSpecs: numberOfPassedSpecs, + NumberOfFailedSpecs: numberOfFailedSpecs, + NumberOfFlakedSpecs: numberOfFlakedSpecs, + } +} + +func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary { + numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown() + if !known { + numTotal = -1 + } + + numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numToRun = -1 + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: numTotal, + NumberOfSpecsThatWillBeRun: numToRun, + NumberOfPendingSpecs: -1, + NumberOfSkippedSpecs: -1, + NumberOfPassedSpecs: -1, + NumberOfFailedSpecs: -1, + NumberOfFlakedSpecs: -1, + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go new file mode 100644 index 000000000..3104bbc88 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -0,0 +1,190 @@ +package suite + +import ( + "math/rand" + "net/http" + "time" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/internal/specrunner" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +type ginkgoTestingT interface { + Fail() +} + +type Suite struct { + topLevelContainer *containernode.ContainerNode + currentContainer *containernode.ContainerNode + containerIndex int + beforeSuiteNode leafnodes.SuiteNode + afterSuiteNode leafnodes.SuiteNode + runner *specrunner.SpecRunner + failer *failer.Failer + running bool +} + +func New(failer *failer.Failer) *Suite { + topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) + + return &Suite{ + topLevelContainer: topLevelContainer, + currentContainer: topLevelContainer, + failer: failer, + containerIndex: 1, + } +} + +func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { + if config.ParallelTotal < 1 { + panic("ginkgo.parallel.total must be >= 1") + } + + if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { + panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") + } + + r := rand.New(rand.NewSource(config.RandomSeed)) + suite.topLevelContainer.Shuffle(r) + iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) + suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config) + + suite.running = true + success := suite.runner.Run() + if !success { + t.Fail() + } + return success, hasProgrammaticFocus +} + +func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) { + specsSlice := []*spec.Spec{} + suite.topLevelContainer.BackPropagateProgrammaticFocus() + for _, collatedNodes := range suite.topLevelContainer.Collate() { + specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) + } + + specs := spec.NewSpecs(specsSlice) + specs.RegexScansFilePath = config.RegexScansFilePath + + if config.RandomizeAllSpecs { + specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) + } + + specs.ApplyFocus(description, config.FocusString, config.SkipString) + + if config.SkipMeasurements { + specs.SkipMeasurements() + } + + var iterator spec_iterator.SpecIterator + + if config.ParallelTotal > 1 { + iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost) + resp, err := http.Get(config.SyncHost + "/has-counter") + if err != nil || resp.StatusCode != http.StatusOK { + iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode) + } + } else { + iterator = spec_iterator.NewSerialIterator(specs.Specs()) + } + + return iterator, specs.HasProgrammaticFocus() +} + +func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { + return suite.runner.CurrentSpecSummary() +} + +func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { + container := containernode.New(text, flag, codeLocation) + suite.currentContainer.PushContainerNode(container) + + previousContainer := suite.currentContainer + suite.currentContainer = container + suite.containerIndex++ + + body() + + suite.containerIndex-- + suite.currentContainer = previousContainer +} + +func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { + if suite.running { + suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go new file mode 100644 index 000000000..090445d08 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go @@ -0,0 +1,76 @@ +package testingtproxy + +import ( + "fmt" + "io" +) + +type failFunc func(message string, callerSkip ...int) + +func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { + return &ginkgoTestingTProxy{ + fail: fail, + offset: offset, + writer: writer, + } +} + +type ginkgoTestingTProxy struct { + fail failFunc + offset int + writer io.Writer +} + +func (t *ginkgoTestingTProxy) Error(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fail() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) FailNow() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Log(args ...interface{}) { + fmt.Fprintln(t.writer, args...) +} + +func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { + t.Log(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) Failed() bool { + return false +} + +func (t *ginkgoTestingTProxy) Parallel() { +} + +func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { + fmt.Println(args...) +} + +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + t.Skip(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) SkipNow() { +} + +func (t *ginkgoTestingTProxy) Skipped() bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go new file mode 100644 index 000000000..6739c3f60 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go @@ -0,0 +1,36 @@ +package writer + +type FakeGinkgoWriter struct { + EventStream []string +} + +func NewFake() *FakeGinkgoWriter { + return &FakeGinkgoWriter{ + EventStream: []string{}, + } +} + +func (writer *FakeGinkgoWriter) AddEvent(event string) { + writer.EventStream = append(writer.EventStream, event) +} + +func (writer *FakeGinkgoWriter) Truncate() { + writer.EventStream = append(writer.EventStream, "TRUNCATE") +} + +func (writer *FakeGinkgoWriter) DumpOut() { + writer.EventStream = append(writer.EventStream, "DUMP") +} + +func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { + writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) +} + +func (writer *FakeGinkgoWriter) Bytes() []byte { + writer.EventStream = append(writer.EventStream, "BYTES") + return nil +} + +func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { + return 0, nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go new file mode 100644 index 000000000..98eca3bdd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go @@ -0,0 +1,89 @@ +package writer + +import ( + "bytes" + "io" + "sync" +) + +type WriterInterface interface { + io.Writer + + Truncate() + DumpOut() + DumpOutWithHeader(header string) + Bytes() []byte +} + +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + stream bool + redirector io.Writer +} + +func New(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + stream: true, + } +} + +func (w *Writer) AndRedirectTo(writer io.Writer) { + w.redirector = writer +} + +func (w *Writer) SetStream(stream bool) { + w.lock.Lock() + defer w.lock.Unlock() + w.stream = stream +} + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + n, err = w.buffer.Write(b) + if w.redirector != nil { + w.redirector.Write(b) + } + if w.stream { + return w.outWriter.Write(b) + } + return n, err +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) DumpOut() { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream { + w.buffer.WriteTo(w.outWriter) + } +} + +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + +func (w *Writer) DumpOutWithHeader(header string) { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream && w.buffer.Len() > 0 { + w.outWriter.Write([]byte(header)) + w.buffer.WriteTo(w.outWriter) + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go new file mode 100644 index 000000000..ac58dd5f7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -0,0 +1,84 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type DefaultReporter struct { + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + specSummaries []*types.SpecSummary +} + +func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { + return &DefaultReporter{ + config: config, + stenographer: stenographer, + } +} + +func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) + if config.ParallelTotal > 1 { + reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) + } +} + +func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + reporter.stenographer.AnnounceSpecWillRun(specSummary) + } +} + +func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct) + } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { + reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceSuccesfulSpec(specSummary) + } + case types.SpecStatePending: + reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) + case types.SpecStateSkipped: + reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace) + case types.SpecStateTimedOut: + reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStatePanicked: + reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStateFailed: + reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + } + + reporter.specSummaries = append(reporter.specSummaries, specSummary) +} + +func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.stenographer.SummarizeFailures(reporter.specSummaries) + reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go new file mode 100644 index 000000000..27db47949 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go @@ -0,0 +1,59 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//FakeReporter is useful for testing purposes +type FakeReporter struct { + Config config.GinkgoConfigType + + BeginSummary *types.SuiteSummary + BeforeSuiteSummary *types.SetupSummary + SpecWillRunSummaries []*types.SpecSummary + SpecSummaries []*types.SpecSummary + AfterSuiteSummary *types.SetupSummary + EndSummary *types.SuiteSummary + + SpecWillRunStub func(specSummary *types.SpecSummary) + SpecDidCompleteStub func(specSummary *types.SpecSummary) +} + +func NewFakeReporter() *FakeReporter { + return &FakeReporter{ + SpecWillRunSummaries: make([]*types.SpecSummary, 0), + SpecSummaries: make([]*types.SpecSummary, 0), + } +} + +func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + fakeR.Config = config + fakeR.BeginSummary = summary +} + +func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.BeforeSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { + if fakeR.SpecWillRunStub != nil { + fakeR.SpecWillRunStub(specSummary) + } + fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) +} + +func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { + if fakeR.SpecDidCompleteStub != nil { + fakeR.SpecDidCompleteStub(specSummary) + } + fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) +} + +func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.AfterSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fakeR.EndSummary = summary +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go new file mode 100644 index 000000000..2c9f3c792 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -0,0 +1,152 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "math" + "os" + "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + TestCases []JUnitTestCase `xml:"testcase"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` + Time float64 `xml:"time,attr"` +} + +type JUnitTestCase struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + Time float64 `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` +} + +type JUnitFailureMessage struct { + Type string `xml:"type,attr"` + Message string `xml:",chardata"` +} + +type JUnitSkipped struct { + XMLName xml.Name `xml:"skipped"` +} + +type JUnitReporter struct { + suite JUnitTestSuite + filename string + testSuiteName string +} + +//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. +func NewJUnitReporter(filename string) *JUnitReporter { + return &JUnitReporter{ + filename: filename, + } +} + +func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.suite = JUnitTestSuite{ + Name: summary.SuiteDescription, + TestCases: []JUnitTestCase{}, + } + reporter.testSuiteName = summary.SuiteDescription +} + +func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { +} + +func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func failureMessage(failure types.SpecFailure) string { + return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String()) +} + +func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testCase := JUnitTestCase{ + Name: name, + ClassName: reporter.testSuiteName, + } + + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(setupSummary.State), + Message: failureMessage(setupSummary.Failure), + } + testCase.SystemOut = setupSummary.CapturedOutput + testCase.Time = setupSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) + } +} + +func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testCase := JUnitTestCase{ + Name: strings.Join(specSummary.ComponentTexts[1:], " "), + ClassName: reporter.testSuiteName, + } + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(specSummary.State), + Message: failureMessage(specSummary.Failure), + } + testCase.SystemOut = specSummary.CapturedOutput + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + testCase.Skipped = &JUnitSkipped{} + } + testCase.Time = specSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) +} + +func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun + reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 + reporter.suite.Failures = summary.NumberOfFailedSpecs + reporter.suite.Errors = 0 + file, err := os.Create(reporter.filename) + if err != nil { + fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) + } + defer file.Close() + file.WriteString(xml.Header) + encoder := xml.NewEncoder(file) + encoder.Indent(" ", " ") + err = encoder.Encode(reporter.suite) + if err != nil { + fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) + } +} + +func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { + switch state { + case types.SpecStateFailed: + return "Failure" + case types.SpecStateTimedOut: + return "Timeout" + case types.SpecStatePanicked: + return "Panic" + default: + return "" + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go new file mode 100644 index 000000000..348b9dfce --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/reporter.go @@ -0,0 +1,15 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type Reporter interface { + SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SpecSuiteDidEnd(summary *types.SuiteSummary) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go new file mode 100644 index 000000000..45b8f8869 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go @@ -0,0 +1,64 @@ +package stenographer + +import ( + "fmt" + "strings" +) + +func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { + var out string + + if len(args) > 0 { + out = fmt.Sprintf(format, args...) + } else { + out = format + } + + if s.color { + return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) + } else { + return out + } +} + +func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { + fmt.Fprintln(s.w, text) + fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text))) +} + +func (s *consoleStenographer) printNewLine() { + fmt.Fprintln(s.w, "") +} + +func (s *consoleStenographer) printDelimiter() { + fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30))) +} + +func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { + fmt.Fprint(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { + fmt.Fprintln(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { + var text string + + if len(args) > 0 { + text = fmt.Sprintf(format, args...) + } else { + text = format + } + + stringArray := strings.Split(text, "\n") + padding := "" + if indentation >= 0 { + padding = strings.Repeat(" ", indentation) + } + for i, s := range stringArray { + stringArray[i] = fmt.Sprintf("%s%s", padding, s) + } + + return strings.Join(stringArray, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go new file mode 100644 index 000000000..98854e7d9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go @@ -0,0 +1,142 @@ +package stenographer + +import ( + "sync" + + "github.com/onsi/ginkgo/types" +) + +func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { + return FakeStenographerCall{ + Method: method, + Args: args, + } +} + +type FakeStenographer struct { + calls []FakeStenographerCall + lock *sync.Mutex +} + +type FakeStenographerCall struct { + Method string + Args []interface{} +} + +func NewFakeStenographer() *FakeStenographer { + stenographer := &FakeStenographer{ + lock: &sync.Mutex{}, + } + stenographer.Reset() + return stenographer +} + +func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + return stenographer.calls +} + +func (stenographer *FakeStenographer) Reset() { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = make([]FakeStenographerCall, 0) +} + +func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + results := make([]FakeStenographerCall, 0) + for _, call := range stenographer.calls { + if call.Method == method { + results = append(results, call) + } + } + + return results +} + +func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) +} + +func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) +} + +func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSpecWillRun", spec) +} + +func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) +} +func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { + stenographer.registerCall("AnnounceCapturedOutput", output) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSuccesfulSpec", spec) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + stenographer.registerCall("AnnouncePendingSpec", spec, noisy) +} + +func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + stenographer.registerCall("SummarizeFailures", summaries) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go new file mode 100644 index 000000000..601c74d66 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -0,0 +1,572 @@ +/* +The stenographer is used by Ginkgo's reporters to generate output. + +Move along, nothing to see here. +*/ + +package stenographer + +import ( + "fmt" + "io" + "runtime" + "strings" + + "github.com/onsi/ginkgo/types" +) + +const defaultStyle = "\x1b[0m" +const boldStyle = "\x1b[1m" +const redColor = "\x1b[91m" +const greenColor = "\x1b[32m" +const yellowColor = "\x1b[33m" +const cyanColor = "\x1b[36m" +const grayColor = "\x1b[90m" +const lightGrayColor = "\x1b[37m" + +type cursorStateType int + +const ( + cursorStateTop cursorStateType = iota + cursorStateStreaming + cursorStateMidBlock + cursorStateEndBlock +) + +type Stenographer interface { + AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) + AnnounceAggregatedParallelRun(nodes int, succinct bool) + AnnounceParallelRun(node int, nodes int, succinct bool) + AnnounceTotalNumberOfSpecs(total int, succinct bool) + AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) + AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) + + AnnounceSpecWillRun(spec *types.SpecSummary) + AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + + AnnounceCapturedOutput(output string) + + AnnounceSuccesfulSpec(spec *types.SpecSummary) + AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) + AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) + + AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) + AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) + + AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) + + SummarizeFailures(summaries []*types.SpecSummary) +} + +func New(color bool, enableFlakes bool, writer io.Writer) Stenographer { + denoter := "•" + if runtime.GOOS == "windows" { + denoter = "+" + } + return &consoleStenographer{ + color: color, + denoter: denoter, + cursorState: cursorStateTop, + enableFlakes: enableFlakes, + w: writer, + } +} + +type consoleStenographer struct { + color bool + denoter string + cursorState cursorStateType + enableFlakes bool + w io.Writer +} + +var alternatingColors = []string{defaultStyle, grayColor} + +func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + if succinct { + s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) + return + } + s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") + s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) + if randomizingAll { + s.print(0, " - Will randomize all specs") + } + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + if succinct { + s.print(0, "- node #%d ", node) + return + } + s.println(0, + "Parallel test node %s/%s.", + s.colorize(boldStyle, "%d", node), + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + if succinct { + s.print(0, "- %d nodes ", nodes) + return + } + s.println(0, + "Running in parallel across %s nodes", + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + if succinct { + s.print(0, "- %d/%d specs ", specsToRun, total) + s.stream() + return + } + s.println(0, + "Will run %s of %s specs", + s.colorize(boldStyle, "%d", specsToRun), + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + if succinct { + s.print(0, "- %d specs ", total) + s.stream() + return + } + s.println(0, + "Will run %s specs", + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + if succinct && summary.SuiteSucceeded { + s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) + return + } + s.printNewLine() + color := greenColor + if !summary.SuiteSucceeded { + color = redColor + } + s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) + + status := "" + if summary.SuiteSucceeded { + status = s.colorize(boldStyle+greenColor, "SUCCESS!") + } else { + status = s.colorize(boldStyle+redColor, "FAIL!") + } + + flakes := "" + if s.enableFlakes { + flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs) + } + + s.print(0, + "%s -- %s | %s | %s | %s\n", + status, + s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), + s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes, + s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), + s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), + ) +} + +func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + s.startBlock() + for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { + s.print(0, s.colorize(alternatingColors[i%2], text)+" ") + } + + indentation := 0 + if len(spec.ComponentTexts) > 2 { + indentation = 1 + s.printNewLine() + } + index := len(spec.ComponentTexts) - 1 + s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) + s.printNewLine() + s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) + s.printNewLine() + s.midBlock() +} + +func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.startBlock() + var message string + switch summary.State { + case types.SpecStateFailed: + message = "Failure" + case types.SpecStatePanicked: + message = "Panic" + case types.SpecStateTimedOut: + message = "Timeout" + } + + s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) + + s.printNewLine() + s.printFailure(indentation, summary.State, summary.Failure, fullTrace) + + s.endBlock() +} + +func (s *consoleStenographer) AnnounceCapturedOutput(output string) { + if output == "" { + return + } + + s.startBlock() + s.println(0, output) + s.midBlock() +} + +func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + s.print(0, s.colorize(greenColor, s.denoter)) + s.stream() +} + +func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), + "", + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), + s.measurementReport(spec, succinct), + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + if noisy { + s.printBlockWithMessage( + s.colorize(yellowColor, "P [PENDING]"), + "", + spec, + false, + ) + } else { + s.print(0, s.colorize(yellowColor, "P")) + s.stream() + } +} + +func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + // Skips at runtime will have a non-empty spec.Failure. All others should be succinct. + if succinct || spec.Failure == (types.SpecFailure{}) { + s.print(0, s.colorize(cyanColor, "S")) + s.stream() + } else { + s.startBlock() + s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printSkip(indentation, spec.Failure) + s.endBlock() + } +} + +func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + failingSpecs := []*types.SpecSummary{} + + for _, summary := range summaries { + if summary.HasFailureState() { + failingSpecs = append(failingSpecs, summary) + } + } + + if len(failingSpecs) == 0 { + return + } + + s.printNewLine() + s.printNewLine() + plural := "s" + if len(failingSpecs) == 1 { + plural = "" + } + s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) + for _, summary := range failingSpecs { + s.printNewLine() + if summary.HasFailureState() { + if summary.TimedOut() { + s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) + } else if summary.Panicked() { + s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) + } else if summary.Failed() { + s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) + } + s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) + s.printNewLine() + s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) + } + } +} + +func (s *consoleStenographer) startBlock() { + if s.cursorState == cursorStateStreaming { + s.printNewLine() + s.printDelimiter() + } else if s.cursorState == cursorStateMidBlock { + s.printNewLine() + } +} + +func (s *consoleStenographer) midBlock() { + s.cursorState = cursorStateMidBlock +} + +func (s *consoleStenographer) endBlock() { + s.printDelimiter() + s.cursorState = cursorStateEndBlock +} + +func (s *consoleStenographer) stream() { + s.cursorState = cursorStateStreaming +} + +func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { + s.startBlock() + s.println(0, header) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) + + if message != "" { + s.printNewLine() + s.println(indentation, message) + } + + s.endBlock() +} + +func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.startBlock() + s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printFailure(indentation, spec.State, spec.Failure, fullTrace) + s.endBlock() +} + +func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string { + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + return " in Suite Setup (BeforeSuite)" + case types.SpecComponentTypeAfterSuite: + return " in Suite Teardown (AfterSuite)" + case types.SpecComponentTypeBeforeEach: + return " in Spec Setup (BeforeEach)" + case types.SpecComponentTypeJustBeforeEach: + return " in Spec Setup (JustBeforeEach)" + case types.SpecComponentTypeAfterEach: + return " in Spec Teardown (AfterEach)" + } + + return "" +} + +func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { + s.println(indentation, s.colorize(cyanColor, spec.Message)) + s.printNewLine() + s.println(indentation, spec.Location.String()) +} + +func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { + if state == types.SpecStatePanicked { + s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) + s.println(indentation, s.colorize(redColor, failure.ForwardedPanic)) + s.println(indentation, failure.Location.String()) + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } else { + s.println(indentation, s.colorize(redColor, failure.Message)) + s.printNewLine() + s.println(indentation, failure.Location.String()) + if fullTrace { + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } + } +} + +func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + startIndex := 1 + indentation := 0 + + if len(componentTexts) == 1 { + startIndex = 0 + } + + for i := startIndex; i < len(componentTexts); i++ { + if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { + color := redColor + if state == types.SpecStateSkipped { + color = cyanColor + } + blockType := "" + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + blockType = "BeforeSuite" + case types.SpecComponentTypeAfterSuite: + blockType = "AfterSuite" + case types.SpecComponentTypeBeforeEach: + blockType = "BeforeEach" + case types.SpecComponentTypeJustBeforeEach: + blockType = "JustBeforeEach" + case types.SpecComponentTypeAfterEach: + blockType = "AfterEach" + case types.SpecComponentTypeIt: + blockType = "It" + case types.SpecComponentTypeMeasure: + blockType = "Measurement" + } + if succinct { + s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) + } else { + s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } else { + if succinct { + s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) + } else { + s.println(indentation, componentTexts[i]) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } + indentation++ + } + + return indentation +} + +func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) + + if succinct { + if len(componentTexts) > 0 { + s.printNewLine() + s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) + } + s.printNewLine() + indentation = 1 + } else { + indentation-- + } + + return indentation +} + +func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { + orderedKeys := make([]string, len(measurements)) + for key, measurement := range measurements { + orderedKeys[measurement.Order] = key + } + return orderedKeys +} + +func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { + if len(spec.Measurements) == 0 { + return "Found no measurements" + } + + message := []string{} + orderedKeys := s.orderedMeasurementKeys(spec.Measurements) + + if succinct { + message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + )) + } + } else { + message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + info := "" + if measurement.Info != nil { + message = append(message, fmt.Sprintf("%v", measurement.Info)) + } + + message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + info, + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + )) + } + } + + return strings.Join(message, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md new file mode 100644 index 000000000..e84226a73 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md @@ -0,0 +1,43 @@ +# go-colorable + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go new file mode 100644 index 000000000..52d6653b3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go @@ -0,0 +1,24 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +func NewColorableStdout() io.Writer { + return os.Stdout +} + +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go new file mode 100644 index 000000000..108800923 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go @@ -0,0 +1,783 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go new file mode 100644 index 000000000..fb976dbd8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" +) + +type NonColorable struct { + out io.Writer + lastbuf bytes.Buffer +} + +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md new file mode 100644 index 000000000..74845de4a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go new file mode 100644 index 000000000..83c588773 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..98ffe86a4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go new file mode 100644 index 000000000..9d24bac1d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..1f0c6bf53 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go new file mode 100644 index 000000000..83c398b16 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go new file mode 100644 index 000000000..36ee2a600 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -0,0 +1,93 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +const ( + messageId = "##teamcity" +) + +type TeamCityReporter struct { + writer io.Writer + testSuiteName string +} + +func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { + return &TeamCityReporter{ + writer: writer, + } +} + +func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.testSuiteName = escape(summary.SuiteDescription) + fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName) +} + +func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testName := escape(name) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) + message := escape(setupSummary.Failure.ComponentCodeLocation.String()) + details := escape(setupSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) + } +} + +func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) +} + +func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + message := escape(specSummary.Failure.ComponentCodeLocation.String()) + details := escape(specSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName) + } + + durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) +} + +func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName) +} + +func escape(output string) string { + output = strings.Replace(output, "|", "||", -1) + output = strings.Replace(output, "'", "|'", -1) + output = strings.Replace(output, "\n", "|n", -1) + output = strings.Replace(output, "\r", "|r", -1) + output = strings.Replace(output, "[", "|[", -1) + output = strings.Replace(output, "]", "|]", -1) + return output +} diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go new file mode 100644 index 000000000..935a89e13 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/code_location.go @@ -0,0 +1,15 @@ +package types + +import ( + "fmt" +) + +type CodeLocation struct { + FileName string + LineNumber int + FullStackTrace string +} + +func (codeLocation CodeLocation) String() string { + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go new file mode 100644 index 000000000..fdd6ed5bd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/synchronization.go @@ -0,0 +1,30 @@ +package types + +import ( + "encoding/json" +) + +type RemoteBeforeSuiteState int + +const ( + RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota + + RemoteBeforeSuiteStatePending + RemoteBeforeSuiteStatePassed + RemoteBeforeSuiteStateFailed + RemoteBeforeSuiteStateDisappeared +) + +type RemoteBeforeSuiteData struct { + Data []byte + State RemoteBeforeSuiteState +} + +func (r RemoteBeforeSuiteData) ToJSON() []byte { + data, _ := json.Marshal(r) + return data +} + +type RemoteAfterSuiteData struct { + CanRun bool +} diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go new file mode 100644 index 000000000..0e89521be --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -0,0 +1,174 @@ +package types + +import ( + "strconv" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 + +/* +SuiteSummary represents the a summary of the test suite and is passed to both +Reporter.SpecSuiteWillBegin +Reporter.SpecSuiteDidEnd + +this is unfortunate as these two methods should receive different objects. When running in parallel +each node does not deterministically know how many specs it will end up running. + +Unfortunately making such a change would break backward compatibility. + +Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields +with -1. +*/ +type SuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + // Flaked specs are those that failed initially, but then passed on a + // subsequent try. + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type SpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*SpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s SpecSummary) HasFailureState() bool { + return s.State.IsFailure() +} + +func (s SpecSummary) TimedOut() bool { + return s.State == SpecStateTimedOut +} + +func (s SpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s SpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s SpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s SpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s SpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type SetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type SpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type SpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s SpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} + +type SpecState uint + +const ( + SpecStateInvalid SpecState = iota + + SpecStatePending + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStatePanicked + SpecStateTimedOut +) + +func (state SpecState) IsFailure() bool { + return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed +} + +type SpecComponentType uint + +const ( + SpecComponentTypeInvalid SpecComponentType = iota + + SpecComponentTypeContainer + SpecComponentTypeBeforeSuite + SpecComponentTypeAfterSuite + SpecComponentTypeBeforeEach + SpecComponentTypeJustBeforeEach + SpecComponentTypeJustAfterEach + SpecComponentTypeAfterEach + SpecComponentTypeIt + SpecComponentTypeMeasure +) + +type FlagType uint + +const ( + FlagTypeNone FlagType = iota + FlagTypeFocused + FlagTypePending +) diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore new file mode 100644 index 000000000..720c13cba --- /dev/null +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +*.test +. +.idea +gomega.iml diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml new file mode 100644 index 000000000..2420a5d07 --- /dev/null +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - 1.10.x + - 1.11.x + - 1.12.x + +env: + - GO111MODULE=on + +install: + - go get -v ./... + - go build ./... + - go get github.com/onsi/ginkgo + - go install github.com/onsi/ginkgo/ginkgo + +script: make test diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md new file mode 100644 index 000000000..5d1eda837 --- /dev/null +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -0,0 +1,136 @@ +## 1.5.0 + +### Features + +- Added MatchKeys matchers [8b909fc] + +### Fixes and Minor Improvements + +- Add type aliases to remove stuttering [03b0461] +- Don't run session_test.go on windows (#324) [5533ce8] + +## 1.4.3 + +### Fixes: + +- ensure file name and line numbers are correctly reported for XUnit [6fff58f] +- Fixed matcher for content-type (#305) [69d9b43] + +## 1.4.2 + +### Fixes: + +- Add go.mod and go.sum files to define the gomega go module [f3de367, a085d30] +- Work around go vet issue with Go v1.11 (#300) [40dd6ad] +- Better output when using with go XUnit-style tests, fixes #255 (#297) [29a4b97] +- Fix MatchJSON fail to parse json.RawMessage (#298) [ae19f1b] +- show threshold in failure message of BeNumericallyMatcher (#293) [4bbecc8] + +## 1.4.1 + +### Fixes: + +- Update documentation formatting and examples (#289) [9be8410] +- allow 'Receive' matcher to be used with concrete types (#286) [41673fd] +- Fix data race in ghttp server (#283) [7ac6b01] +- Travis badge should only show master [cc102ab] + +## 1.4.0 + +### Features +- Make string pretty diff user configurable (#273) [eb112ce, 649b44d] + +### Fixes +- Use httputil.DumpRequest to pretty-print unhandled requests (#278) [a4ff0fc, b7d1a52] +- fix typo floa32 > float32 (#272) [041ae3b, 6e33911] +- Fix link to documentation on adding your own matchers (#270) [bb2c830, fcebc62] +- Use setters and getters to avoid race condition (#262) [13057c3, a9c79f1] +- Avoid sending a signal if the process is not alive (#259) [b8043e5, 4fc1762] +- Improve message from AssignableToTypeOf when expected value is nil (#281) [9c1fb20] + +## 1.3.0 + +Improvements: + +- The `Equal` matcher matches byte slices more performantly. +- Improved how `MatchError` matches error strings. +- `MatchXML` ignores the order of xml node attributes. +- Improve support for XUnit style golang tests. ([#254](https://github.com/onsi/gomega/issues/254)) + +Bug Fixes: + +- Diff generation now handles multi-byte sequences correctly. +- Multiple goroutines can now call `gexec.Build` concurrently. + +## 1.2.0 + +Improvements: + +- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. +- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests. +- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel. +- Added `HavePrefix` and `HaveSuffix` matchers. +- `ghttp` can now handle concurrent requests. +- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`. +- Improved `ghttp`'s behavior around failing assertions and panics: + - If a registered handler makes a failing assertion `ghttp` will return `500`. + - If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive. +- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. +- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher +- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers +- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time. +- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer. + +Bug Fixes: +- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure. +- `ContainElement` no longer bails if a passed-in matcher errors. + +## 1.0 (8/2/2014) + +No changes. Dropping "beta" from the version number. + +## 1.0.0-beta (7/8/2014) +Breaking Changes: + +- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead. +- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher + +New Test-Support Features: + +- `ghttp`: supports testing http clients + - Provides a flexible fake http server + - Provides a collection of chainable http handlers that perform assertions. +- `gbytes`: supports making ordered assertions against streams of data + - Provides a `gbytes.Buffer` + - Provides a `Say` matcher to perform ordered assertions against output data +- `gexec`: supports testing external processes + - Provides support for building Go binaries + - Wraps and starts `exec.Cmd` commands + - Makes it easy to assert against stdout and stderr + - Makes it easy to send signals and wait for processes to exit + - Provides an `Exit` matcher to assert against exit code. + +DSL Changes: + +- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs. +- The default timeouts for `Eventually` and `Consistently` are now configurable. + +New Matchers: + +- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map. +- `BeTemporally`: like `BeNumerically` but for `time.Time` +- `HaveKeyWithValue`: asserts a map has a given key with the given value. + +Updated Matchers: + +- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. +- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Gomega's internal to `internal` diff --git a/vendor/github.com/onsi/gomega/CONTRIBUTING.md b/vendor/github.com/onsi/gomega/CONTRIBUTING.md new file mode 100644 index 000000000..0d7a09928 --- /dev/null +++ b/vendor/github.com/onsi/gomega/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing to Gomega + +Your contributions to Gomega are essential for its long-term maintenance and improvement. To make a contribution: + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - Make sure to add appropriate unit tests + - Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR + - Please run following linter locally `go vet ./...` and make sure output does not contain any warnings +- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR. + +If you're a committer, check out RELEASING.md to learn how to cut a release. + +Thanks for supporting Gomega! diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE new file mode 100644 index 000000000..9415ee72c --- /dev/null +++ b/vendor/github.com/onsi/gomega/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile new file mode 100644 index 000000000..c92cd56e3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/Makefile @@ -0,0 +1,6 @@ +test: + [ -z "`gofmt -s -w -l -e .`" ] + go vet + ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race + +.PHONY: test diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md new file mode 100644 index 000000000..76aa6b558 --- /dev/null +++ b/vendor/github.com/onsi/gomega/README.md @@ -0,0 +1,21 @@ +![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) + +[![Build Status](https://travis-ci.org/onsi/gomega.svg?branch=master)](https://travis-ci.org/onsi/gomega) + +Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). + +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. + +## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang + +Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/) + +## Community Matchers + +A collection of community matchers is available on the [wiki](https://github.com/onsi/gomega/wiki). + +## License + +Gomega is MIT-Licensed + +The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license. diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md new file mode 100644 index 000000000..998d64ee7 --- /dev/null +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -0,0 +1,12 @@ +A Gomega release is a tagged sha and a GitHub release. To cut a release: + +1. Ensure CHANGELOG.md is up to date. + - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Categorize the changes into + - Breaking Changes (requires a major version) + - New Features (minor version) + - Fixes (fix version) + - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) +2. Update GOMEGA_VERSION in `gomega_dsl.go` +3. Push a commit with the version number as the commit message (e.g. `v1.3.0`) +4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go new file mode 100644 index 000000000..6559525f1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -0,0 +1,382 @@ +/* +Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. +*/ +package format + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects +var MaxDepth = uint(10) + +/* +By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. + +Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead. + +Note that GoString and String don't always have all the information you need to understand why a test failed! +*/ +var UseStringerRepresentation = false + +/* +Print the content of context objects. By default it will be suppressed. + +Set PrintContextObjects = true to enable printing of the context internals. +*/ +var PrintContextObjects = false + +// TruncatedDiff choose if we should display a truncated pretty diff or not +var TruncatedDiff = true + +// Ctx interface defined here to keep backwards compatability with go < 1.7 +// It matches the context.Context interface +type Ctx interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + +var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +var timeType = reflect.TypeOf(time.Time{}) + +//The default indentation string emitted by the format package +var Indent = " " + +var longFormThreshold = 20 + +/* +Generates a formatted matcher success/failure message of the form: + + Expected + + + + +If expected is omited, then the message looks like: + + Expected + + +*/ +func Message(actual interface{}, message string, expected ...interface{}) string { + if len(expected) == 0 { + return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) + } + return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) +} + +/* + +Generates a nicely formatted matcher success / failure message + +Much like Message(...), but it attempts to pretty print diffs in strings + +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." + +*/ + +func MessageWithDiff(actual, message, expected string) string { + if TruncatedDiff && len(actual) >= truncateThreshold && len(expected) >= truncateThreshold { + diffPoint := findFirstMismatch(actual, expected) + formattedActual := truncateAndFormat(actual, diffPoint) + formattedExpected := truncateAndFormat(expected, diffPoint) + + spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected) + + tabLength := 4 + spaceFromMessageToActual := tabLength + len(": ") - len(message) + padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + return Message(formattedActual, message+padding, formattedExpected) + } + return Message(actual, message, expected) +} + +func truncateAndFormat(str string, index int) string { + leftPadding := `...` + rightPadding := `...` + + start := index - charactersAroundMismatchToInclude + if start < 0 { + start = 0 + leftPadding = "" + } + + // slice index must include the mis-matched character + lengthOfMismatchedCharacter := 1 + end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter + if end > len(str) { + end = len(str) + rightPadding = "" + + } + return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding) +} + +func findFirstMismatch(a, b string) int { + aSlice := strings.Split(a, "") + bSlice := strings.Split(b, "") + + for index, str := range aSlice { + if index > len(bSlice)-1 { + return index + } + if str != bSlice[index] { + return index + } + } + + if len(b) > len(a) { + return len(a) + 1 + } + + return 0 +} + +const ( + truncateThreshold = 50 + charactersAroundMismatchToInclude = 5 +) + +/* +Pretty prints the passed in object at the passed in indentation level. + +Object recurses into deeply nested objects emitting pretty-printed representations of their components. + +Modify format.MaxDepth to control how deep the recursion is allowed to go +Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of +recursing into the object. + +Set PrintContextObjects to true to print the content of objects implementing context.Context +*/ +func Object(object interface{}, indentation uint) string { + indent := strings.Repeat(Indent, int(indentation)) + value := reflect.ValueOf(object) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) +} + +/* +IndentString takes a string and indents each line by the specified amount. +*/ +func IndentString(s string, indentation uint) string { + components := strings.Split(s, "\n") + result := "" + indent := strings.Repeat(Indent, int(indentation)) + for i, component := range components { + result += indent + component + if i < len(components)-1 { + result += "\n" + } + } + + return result +} + +func formatType(object interface{}) string { + t := reflect.TypeOf(object) + if t == nil { + return "nil" + } + switch t.Kind() { + case reflect.Chan: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Ptr: + return fmt.Sprintf("%T | %p", object, object) + case reflect.Slice: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Map: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d", object, v.Len()) + default: + return fmt.Sprintf("%T", object) + } +} + +func formatValue(value reflect.Value, indentation uint) string { + if indentation > MaxDepth { + return "..." + } + + if isNilValue(value) { + return "nil" + } + + if UseStringerRepresentation { + if value.CanInterface() { + obj := value.Interface() + switch x := obj.(type) { + case fmt.GoStringer: + return x.GoString() + case fmt.Stringer: + return x.String() + } + } + } + + if !PrintContextObjects { + if value.Type().Implements(contextType) && indentation > 1 { + return "" + } + } + + switch value.Kind() { + case reflect.Bool: + return fmt.Sprintf("%v", value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", value.Uint()) + case reflect.Uintptr: + return fmt.Sprintf("0x%x", value.Uint()) + case reflect.Float32, reflect.Float64: + return fmt.Sprintf("%v", value.Float()) + case reflect.Complex64, reflect.Complex128: + return fmt.Sprintf("%v", value.Complex()) + case reflect.Chan: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Func: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Ptr: + return formatValue(value.Elem(), indentation) + case reflect.Slice: + return formatSlice(value, indentation) + case reflect.String: + return formatString(value.String(), indentation) + case reflect.Array: + return formatSlice(value, indentation) + case reflect.Map: + return formatMap(value, indentation) + case reflect.Struct: + if value.Type() == timeType && value.CanInterface() { + t, _ := value.Interface().(time.Time) + return t.Format(time.RFC3339Nano) + } + return formatStruct(value, indentation) + case reflect.Interface: + return formatValue(value.Elem(), indentation) + default: + if value.CanInterface() { + return fmt.Sprintf("%#v", value.Interface()) + } + return fmt.Sprintf("%#v", value) + } +} + +func formatString(object interface{}, indentation uint) string { + if indentation == 1 { + s := fmt.Sprintf("%s", object) + components := strings.Split(s, "\n") + result := "" + for i, component := range components { + if i == 0 { + result += component + } else { + result += Indent + component + } + if i < len(components)-1 { + result += "\n" + } + } + + return fmt.Sprintf("%s", result) + } else { + return fmt.Sprintf("%q", object) + } +} + +func formatSlice(v reflect.Value, indentation uint) string { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) { + return formatString(v.Bytes(), indentation) + } + + l := v.Len() + result := make([]string, l) + longest := 0 + for i := 0; i < l; i++ { + result[i] = formatValue(v.Index(i), indentation+1) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("[%s]", strings.Join(result, ", ")) +} + +func formatMap(v reflect.Value, indentation uint) string { + l := v.Len() + result := make([]string, l) + + longest := 0 + for i, key := range v.MapKeys() { + value := v.MapIndex(key) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1)) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func formatStruct(v reflect.Value, indentation uint) string { + t := v.Type() + + l := v.NumField() + result := []string{} + longest := 0 + for i := 0; i < l; i++ { + structField := t.Field(i) + fieldEntry := v.Field(i) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + result = append(result, representation) + if len(representation) > longest { + longest = len(representation) + } + } + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func isNilValue(a reflect.Value) bool { + switch a.Kind() { + case reflect.Invalid: + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return a.IsNil() + } + + return false +} + +/* +Returns true when the string is entirely made of printable runes, false otherwise. +*/ +func isPrintableString(str string) bool { + for _, runeValue := range str { + if !strconv.IsPrint(runeValue) { + return false + } + } + return true +} diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod new file mode 100644 index 000000000..65eedf696 --- /dev/null +++ b/vendor/github.com/onsi/gomega/go.mod @@ -0,0 +1,15 @@ +module github.com/onsi/gomega + +require ( + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/golang/protobuf v1.2.0 + github.com/hpcloud/tail v1.0.0 // indirect + github.com/onsi/ginkgo v1.6.0 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum new file mode 100644 index 000000000..b23f6ef02 --- /dev/null +++ b/vendor/github.com/onsi/gomega/go.sum @@ -0,0 +1,24 @@ +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go new file mode 100644 index 000000000..448d595da --- /dev/null +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -0,0 +1,429 @@ +/* +Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. + +The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ + +Gomega on Github: http://github.com/onsi/gomega + +Learn more about Ginkgo online: http://onsi.github.io/ginkgo + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Gomega is MIT-Licensed +*/ +package gomega + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/asyncassertion" + "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/types" +) + +const GOMEGA_VERSION = "1.5.0" + +const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +If you're using Ginkgo then you probably forgot to put your assertion in an It(). +Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). +Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations. +` + +var globalFailWrapper *types.GomegaFailWrapper + +var defaultEventuallyTimeout = time.Second +var defaultEventuallyPollingInterval = 10 * time.Millisecond +var defaultConsistentlyDuration = 100 * time.Millisecond +var defaultConsistentlyPollingInterval = 10 * time.Millisecond + +// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +// the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(handler types.GomegaFailHandler) { + RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler) +} + +// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler +// are used globally. +func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) { + if handler == nil { + globalFailWrapper = nil + return + } + + globalFailWrapper = &types.GomegaFailWrapper{ + Fail: handler, + TWithHelper: t, + } +} + +// RegisterTestingT connects Gomega to Golang's XUnit style +// Testing.T tests. It is now deprecated and you should use NewWithT() instead. +// +// Legacy Documentation: +// +// You'll need to call this at the top of each XUnit style test: +// +// func TestFarmHasCow(t *testing.T) { +// RegisterTestingT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +// +// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to +// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests +// in parallel as the global fail handler cannot point to more than one testing.T at a time. +// +// NewWithT() does not have this limitation +// +// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +func RegisterTestingT(t types.GomegaTestingT) { + tWithHelper, hasHelper := t.(types.TWithHelper) + if !hasHelper { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) + return + } + RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) +} + +// InterceptGomegaFailures runs a given callback and returns an array of +// failure messages generated by any Gomega assertions within the callback. +// +// This is accomplished by temporarily replacing the *global* fail handler +// with a fail handler that simply annotates failures. The original fail handler +// is reset when InterceptGomegaFailures returns. +// +// This is most useful when testing custom matchers, but can also be used to check +// on a value using a Gomega assertion without causing a test failure. +func InterceptGomegaFailures(f func()) []string { + originalHandler := globalFailWrapper.Fail + failures := []string{} + RegisterFailHandler(func(message string, callerSkip ...int) { + failures = append(failures, message) + }) + f() + RegisterFailHandler(originalHandler) + return failures +} + +// Ω wraps an actual value allowing assertions to be made on it: +// Ω("foo").Should(Equal("foo")) +// +// If Ω is passed more than one argument it will pass the *first* argument to the matcher. +// All subsequent arguments will be required to be nil/zero. +// +// This is convenient if you want to make an assertion on a method/function that returns +// a value and an error - a common patter in Go. +// +// For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +// Then: +// Ω(MyAmazingThing()).Should(Equal(3)) +// Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +// Ω and Expect are identical +func Ω(actual interface{}, extra ...interface{}) Assertion { + return ExpectWithOffset(0, actual, extra...) +} + +// Expect wraps an actual value allowing assertions to be made on it: +// Expect("foo").To(Equal("foo")) +// +// If Expect is passed more than one argument it will pass the *first* argument to the matcher. +// All subsequent arguments will be required to be nil/zero. +// +// This is convenient if you want to make an assertion on a method/function that returns +// a value and an error - a common patter in Go. +// +// For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +// Then: +// Expect(MyAmazingThing()).Should(Equal(3)) +// Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +// Expect and Ω are identical +func Expect(actual interface{}, extra ...interface{}) Assertion { + return ExpectWithOffset(0, actual, extra...) +} + +// ExpectWithOffset wraps an actual value allowing assertions to be made on it: +// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +// this is used to modify the call-stack offset when computing line numbers. +// +// This is most useful in helper functions that make assertions. If you want Gomega's +// error message to refer to the calling line in the test (as opposed to the line in the helper function) +// set the first argument of `ExpectWithOffset` appropriately. +func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + return assertion.New(actual, globalFailWrapper, offset, extra...) +} + +// Eventually wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically until it passes or a timeout occurs. +// +// Both the timeout and polling interval are configurable as optional arguments: +// The first optional argument is the timeout +// The second optional argument is the polling interval +// +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. +// +// If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +// then Eventually will call the function periodically and try the matcher against the function's first return value. +// +// Example: +// +// Eventually(func() int { +// return thingImPolling.Count() +// }).Should(BeNumerically(">=", 17)) +// +// Note that this example could be rewritten: +// +// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) +// +// If the function returns more than one value, then Eventually will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// +// For example, consider a method that returns a value and an error: +// func FetchFromDB() (string, error) +// +// Then +// Eventually(FetchFromDB).Should(Equal("hasselhoff")) +// +// Will pass only if the the returned error is nil and the returned string passes the matcher. +// +// Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + return EventuallyWithOffset(0, actual, intervals...) +} + +// EventuallyWithOffset operates like Eventually but takes an additional +// initial argument to indicate an offset in the call stack. This is useful when building helper +// functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) +} + +// Consistently wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically and is required to pass for a period of time. +// +// Both the total time and polling interval are configurable as optional arguments: +// The first optional argument is the duration that Consistently will run for +// The second optional argument is the polling interval +// +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. +// +// If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +// then Consistently will call the function periodically and try the matcher against the function's first return value. +// +// If the function returns more than one value, then Consistently will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// +// Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// +// Consistently(channel).ShouldNot(Receive()) +// +// Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + return ConsistentlyWithOffset(0, actual, intervals...) +} + +// ConsistentlyWithOffset operates like Consistnetly but takes an additional +// initial argument to indicate an offset in the call stack. This is useful when building helper +// functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) +} + +// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. +func SetDefaultEventuallyTimeout(t time.Duration) { + defaultEventuallyTimeout = t +} + +// SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually. +func SetDefaultEventuallyPollingInterval(t time.Duration) { + defaultEventuallyPollingInterval = t +} + +// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +func SetDefaultConsistentlyDuration(t time.Duration) { + defaultConsistentlyDuration = t +} + +// SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently. +func SetDefaultConsistentlyPollingInterval(t time.Duration) { + defaultConsistentlyPollingInterval = t +} + +// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against +// the matcher passed to the Should and ShouldNot methods. +// +// Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to +// fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more +// descriptive. +// +// Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. +// +// Example: +// +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +type AsyncAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. +type GomegaAsyncAssertion = AsyncAssertion + +// Assertion is returned by Ω and Expect and compares the actual value to the matcher +// passed to the Should/ShouldNot and To/ToNot/NotTo methods. +// +// Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect +// though this is not enforced. +// +// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() +// and is used to annotate failure messages. +// +// All methods return a bool that is true if hte assertion passed and false if it failed. +// +// Example: +// +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +type Assertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +// GomegaAssertion is deprecated in favor of Assertion, which does not stutter. +type GomegaAssertion = Assertion + +// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +type OmegaMatcher types.GomegaMatcher + +// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage +// Gomega's rich ecosystem of matchers in standard `testing` test suites. +// +// Use `NewWithT` to instantiate a `WithT` +type WithT struct { + t types.GomegaTestingT +} + +// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. +type GomegaWithT = WithT + +// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +// Gomega's rich ecosystem of matchers in standard `testing` test suits. +// +// func TestFarmHasCow(t *testing.T) { +// g := gomega.NewWithT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +func NewWithT(t types.GomegaTestingT) *WithT { + return &WithT{ + t: t, + } +} + +// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. +func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { + return NewWithT(t) +} + +// Expect is used to make assertions. See documentation for Expect. +func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { + return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...) +} + +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) +} + +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) +} + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go new file mode 100644 index 000000000..00197b67a --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -0,0 +1,105 @@ +package assertion + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actualInput interface{} + failWrapper *types.GomegaFailWrapper + offset int + extra []interface{} +} + +func New(actualInput interface{}, failWrapper *types.GomegaFailWrapper, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actualInput: actualInput, + failWrapper: failWrapper, + offset: offset, + extra: extra, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + matches, err := matcher.Match(assertion.actualInput) + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.TWithHelper.Helper() + if err != nil { + assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(assertion.actualInput) + } else { + message = matcher.NegatedFailureMessage(assertion.actualInput) + } + assertion.failWrapper.Fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { + success, message := vetExtras(assertion.extra) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.TWithHelper.Helper() + assertion.failWrapper.Fail(description+message, 2+assertion.offset) + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go new file mode 100644 index 000000000..cdab233eb --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -0,0 +1,194 @@ +package asyncassertion + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + actualInput interface{} + timeoutInterval time.Duration + pollingInterval time.Duration + failWrapper *types.GomegaFailWrapper + offset int +} + +func New(asyncType AsyncAssertionType, actualInput interface{}, failWrapper *types.GomegaFailWrapper, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + actualType := reflect.TypeOf(actualInput) + if actualType.Kind() == reflect.Func { + if actualType.NumIn() != 0 || actualType.NumOut() == 0 { + panic("Expected a function with no arguments and one or more return values.") + } + } + + return &AsyncAssertion{ + asyncType: asyncType, + actualInput: actualInput, + failWrapper: failWrapper, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + } +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *AsyncAssertion) actualInputIsAFunction() bool { + actualType := reflect.TypeOf(assertion.actualInput) + return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if assertion.actualInputIsAFunction() { + values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) + + extras := []interface{}{} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + + success, message := vetExtras(extras) + + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil + } + + return assertion.actualInput, nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualInputIsAFunction() { + return true + } + + return oraclematcher.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + description := assertion.buildDescription(optionalDescription...) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + assertion.failWrapper.TWithHelper.Helper() + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.failWrapper.TWithHelper.Helper() + assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go new file mode 100644 index 000000000..66cad88a1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go @@ -0,0 +1,25 @@ +package oraclematcher + +import "github.com/onsi/gomega/types" + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go new file mode 100644 index 000000000..bb27032f6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -0,0 +1,60 @@ +package testingtsupport + +import ( + "regexp" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/types" +) + +var StackTracePruneRE = regexp.MustCompile(`\/gomega\/|\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + +type EmptyTWithHelper struct{} + +func (e EmptyTWithHelper) Helper() {} + +type gomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +func BuildTestingTGomegaFailWrapper(t gomegaTestingT) *types.GomegaFailWrapper { + tWithHelper, hasHelper := t.(types.TWithHelper) + if !hasHelper { + tWithHelper = EmptyTWithHelper{} + } + + fail := func(message string, callerSkip ...int) { + if hasHelper { + tWithHelper.Helper() + t.Fatalf("\n%s", message) + } else { + skip := 2 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + stackTrace := pruneStack(string(debug.Stack()), skip) + t.Fatalf("\n%s\n%s\n", stackTrace, message) + } + } + + return &types.GomegaFailWrapper{ + Fail: fail, + TWithHelper: tWithHelper, + } +} + +func pruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n")[1:] + if len(stack) > 2*skip { + stack = stack[2*skip:] + } + prunedStack := []string{} + for i := 0; i < len(stack)/2; i++ { + if !StackTracePruneRE.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go new file mode 100644 index 000000000..c3a326dd4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -0,0 +1,427 @@ +package gomega + +import ( + "time" + + "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +//types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func Equal(expected interface{}) types.GomegaMatcher { + return &matchers.EqualMatcher{ + Expected: expected, + } +} + +//BeEquivalentTo is more lax than Equal, allowing equality between different types. +//This is done by converting actual to have the type of expected before +//attempting equality with reflect.DeepEqual. +//It is an error for actual and expected to be nil. Use BeNil() instead. +func BeEquivalentTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeEquivalentToMatcher{ + Expected: expected, + } +} + +//BeIdenticalTo uses the == operator to compare actual with expected. +//BeIdenticalTo is strict about types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func BeIdenticalTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeIdenticalToMatcher{ + Expected: expected, + } +} + +//BeNil succeeds if actual is nil +func BeNil() types.GomegaMatcher { + return &matchers.BeNilMatcher{} +} + +//BeTrue succeeds if actual is true +func BeTrue() types.GomegaMatcher { + return &matchers.BeTrueMatcher{} +} + +//BeFalse succeeds if actual is false +func BeFalse() types.GomegaMatcher { + return &matchers.BeFalseMatcher{} +} + +//HaveOccurred succeeds if actual is a non-nil error +//The typical Go error checking pattern looks like: +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) +func HaveOccurred() types.GomegaMatcher { + return &matchers.HaveOccurredMatcher{} +} + +//Succeed passes if actual is a nil error +//Succeed is intended to be used with functions that return a single error value. Instead of +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) +// +//You can write: +// Expect(SomethingThatMightFail()).Should(Succeed()) +// +//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +func Succeed() types.GomegaMatcher { + return &matchers.SucceedMatcher{} +} + +//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// +//These are valid use-cases: +// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// +//It is an error for err to be nil or an object that does not implement the Error interface +func MatchError(expected interface{}) types.GomegaMatcher { + return &matchers.MatchErrorMatcher{ + Expected: expected, + } +} + +//BeClosed succeeds if actual is a closed channel. +//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// +//In order to check whether or not the channel is closed, Gomega must try to read from the channel +//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +//values coming down the channel. +// +//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// +//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +func BeClosed() types.GomegaMatcher { + return &matchers.BeClosedMatcher{} +} + +//Receive succeeds if there is a value to be received on actual. +//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +//Receive returns immediately and never blocks: +// +//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// +//If you have a go-routine running in the background that will write to channel `c` you can: +// Eventually(c).Should(Receive()) +// +//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// +//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: +// Consistently(c).ShouldNot(Receive()) +// +//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: +// Expect(c).Should(Receive(Equal("foo"))) +// +//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// +//Passing Receive a matcher is especially useful when paired with Eventually: +// +// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// +//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// +//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Expect(myThing.Sprocket).Should(Equal("foo")) +// Expect(myThing.IsValid()).Should(BeTrue()) +func Receive(args ...interface{}) types.GomegaMatcher { + var arg interface{} + if len(args) > 0 { + arg = args[0] + } + + return &matchers.ReceiveMatcher{ + Arg: arg, + } +} + +//BeSent succeeds if a value can be sent to actual. +//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +//In addition, actual must not be closed. +// +//BeSent never blocks: +// +//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately +//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// +//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +func BeSent(arg interface{}) types.GomegaMatcher { + return &matchers.BeSentMatcher{ + Arg: arg, + } +} + +//MatchRegexp succeeds if actual is a string or stringer that matches the +//passed-in regexp. Optional arguments can be provided to construct a regexp +//via fmt.Sprintf(). +func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { + return &matchers.MatchRegexpMatcher{ + Regexp: regexp, + Args: args, + } +} + +//ContainSubstring succeeds if actual is a string or stringer that contains the +//passed-in substring. Optional arguments can be provided to construct the substring +//via fmt.Sprintf(). +func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { + return &matchers.ContainSubstringMatcher{ + Substr: substr, + Args: args, + } +} + +//HavePrefix succeeds if actual is a string or stringer that contains the +//passed-in string as a prefix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HavePrefixMatcher{ + Prefix: prefix, + Args: args, + } +} + +//HaveSuffix succeeds if actual is a string or stringer that contains the +//passed-in string as a suffix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HaveSuffixMatcher{ + Suffix: suffix, + Args: args, + } +} + +//MatchJSON succeeds if actual is a string or stringer of JSON that matches +//the expected JSON. The JSONs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchJSON(json interface{}) types.GomegaMatcher { + return &matchers.MatchJSONMatcher{ + JSONToMatch: json, + } +} + +//MatchXML succeeds if actual is a string or stringer of XML that matches +//the expected XML. The XMLs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like whitespaces shouldn't matter. +func MatchXML(xml interface{}) types.GomegaMatcher { + return &matchers.MatchXMLMatcher{ + XMLToMatch: xml, + } +} + +//MatchYAML succeeds if actual is a string or stringer of YAML that matches +//the expected YAML. The YAML's are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchYAML(yaml interface{}) types.GomegaMatcher { + return &matchers.MatchYAMLMatcher{ + YAMLToMatch: yaml, + } +} + +//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +func BeEmpty() types.GomegaMatcher { + return &matchers.BeEmptyMatcher{} +} + +//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +func HaveLen(count int) types.GomegaMatcher { + return &matchers.HaveLenMatcher{ + Count: count, + } +} + +//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +func HaveCap(count int) types.GomegaMatcher { + return &matchers.HaveCapMatcher{ + Count: count, + } +} + +//BeZero succeeds if actual is the zero value for its type or if actual is nil. +func BeZero() types.GomegaMatcher { + return &matchers.BeZeroMatcher{} +} + +//ContainElement succeeds if actual contains the passed in element. +//By default ContainElement() uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +//Actual must be an array, slice or map. +//For maps, ContainElement searches through the map's values. +func ContainElement(element interface{}) types.GomegaMatcher { + return &matchers.ContainElementMatcher{ + Element: element, + } +} + +//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. +//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// +//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +//is the only element passed in to ConsistOf: +// +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +func ConsistOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.ConsistOfMatcher{ + Elements: elements, + } +} + +//HaveKey succeeds if actual is a map with the passed in key. +//By default HaveKey uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +func HaveKey(key interface{}) types.GomegaMatcher { + return &matchers.HaveKeyMatcher{ + Key: key, + } +} + +//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +//By default HaveKeyWithValue uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { + return &matchers.HaveKeyWithValueMatcher{ + Key: key, + Value: value, + } +} + +//BeNumerically performs numerical assertions in a type-agnostic way. +//Actual and expected should be numbers, though the specific type of +//number is irrelevant (float32, float64, uint8, etc...). +// +//There are six, self-explanatory, supported comparators: +// Expect(1.0).Should(BeNumerically("==", 1)) +// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Expect(1.0).Should(BeNumerically(">", 0.9)) +// Expect(1.0).Should(BeNumerically(">=", 1.0)) +// Expect(1.0).Should(BeNumerically("<", 3)) +// Expect(1.0).Should(BeNumerically("<=", 1.0)) +func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { + return &matchers.BeNumericallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + } +} + +//BeTemporally compares time.Time's like BeNumerically +//Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) +// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { + return &matchers.BeTemporallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + Threshold: threshold, + } +} + +//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +//It will return an error when one of the values is nil. +// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values +// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { + return &matchers.AssignableToTypeOfMatcher{ + Expected: expected, + } +} + +//Panic succeeds if actual is a function that, when invoked, panics. +//Actual must be a function that takes no arguments and returns no results. +func Panic() types.GomegaMatcher { + return &matchers.PanicMatcher{} +} + +//BeAnExistingFile succeeds if a file exists. +//Actual must be a string representing the abs path to the file being checked. +func BeAnExistingFile() types.GomegaMatcher { + return &matchers.BeAnExistingFileMatcher{} +} + +//BeARegularFile succeeds if a file exists and is a regular file. +//Actual must be a string representing the abs path to the file being checked. +func BeARegularFile() types.GomegaMatcher { + return &matchers.BeARegularFileMatcher{} +} + +//BeADirectory succeeds if a file exists and is a directory. +//Actual must be a string representing the abs path to the file being checked. +func BeADirectory() types.GomegaMatcher { + return &matchers.BeADirectoryMatcher{} +} + +//And succeeds only if all of the given matchers succeed. +//The matchers are tried in order, and will fail-fast if one doesn't succeed. +// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func And(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.AndMatcher{Matchers: ms} +} + +//SatisfyAll is an alias for And(). +// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return And(matchers...) +} + +//Or succeeds if any of the given matchers succeed. +//The matchers are tried in order and will return immediately upon the first successful match. +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.OrMatcher{Matchers: ms} +} + +//SatisfyAny is an alias for Or(). +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return Or(matchers...) +} + +//Not negates the given matcher; it succeeds if the given matcher fails. +// Expect(1).To(Not(Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Not(matcher types.GomegaMatcher) types.GomegaMatcher { + return &matchers.NotMatcher{Matcher: matcher} +} + +//WithTransform applies the `transform` to the actual value and matches it against `matcher`. +//The given transform must be a function of one parameter that returns one value. +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { + return matchers.NewWithTransformMatcher(transform, matcher) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go new file mode 100644 index 000000000..d83a29164 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -0,0 +1,63 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AndMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstFailedMatcher types.GomegaMatcher +} + +func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { + m.firstFailedMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if !success || err != nil { + m.firstFailedMatcher = matcher + return false, err + } + } + return true, nil +} + +func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { + return m.firstFailedMatcher.FailureMessage(actual) +} + +func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) +} + +func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: T, F, => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become T + + Match eval: T, T, T => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to F. + */ + + if m.firstFailedMatcher == nil { + // so all matchers succeeded.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } + // one of the matchers failed.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) +} diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go new file mode 100644 index 000000000..51f8be6ae --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type AssignableToTypeOfMatcher struct { + Expected interface{} +} + +func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } else if matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare type to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } else if actual == nil { + return false, nil + } + + actualType := reflect.TypeOf(actual) + expectedType := reflect.TypeOf(matcher.Expected) + + return actualType.AssignableTo(expectedType), nil +} + +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) +} + +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/attributes_slice.go b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go new file mode 100644 index 000000000..355b362f4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go @@ -0,0 +1,14 @@ +package matchers + +import ( + "encoding/xml" + "strings" +) + +type attributesSlice []xml.Attr + +func (attrs attributesSlice) Len() int { return len(attrs) } +func (attrs attributesSlice) Less(i, j int) bool { + return strings.Compare(attrs[i].Name.Local, attrs[j].Name.Local) == -1 +} +func (attrs attributesSlice) Swap(i, j int) { attrs[i], attrs[j] = attrs[j], attrs[i] } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go new file mode 100644 index 000000000..7b6975e41 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notADirectoryError struct { + os.FileInfo +} + +func (t notADirectoryError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.Mode().IsRegular(): + return "file is a regular file" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeADirectoryMatcher struct { + expected interface{} + err error +} + +func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsDir() { + matcher.err = notADirectoryError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) +} + +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a directory")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go new file mode 100644 index 000000000..e239131fb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notARegularFileError struct { + os.FileInfo +} + +func (t notARegularFileError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.IsDir(): + return "file is a directory" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeARegularFileMatcher struct { + expected interface{} + err error +} + +func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsRegular() { + matcher.err = notARegularFileError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) +} + +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a regular file")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go new file mode 100644 index 000000000..d42eba223 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type BeAnExistingFileMatcher struct { + expected interface{} +} + +func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") + } + + if _, err = os.Stat(actualFilename); err != nil { + switch { + case os.IsNotExist(err): + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to exist")) +} + +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to exist")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go new file mode 100644 index 000000000..80c9c8bb1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -0,0 +1,46 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeClosedMatcher struct { +} + +func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1)) + } + + winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ + {Dir: reflect.SelectRecv, Chan: channelValue}, + {Dir: reflect.SelectDefault}, + }) + + var closed bool + if winnerIndex == 0 { + closed = !open + } else if winnerIndex == 1 { + closed = false + } + + return closed, nil +} + +func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be closed") +} + +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be open") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go new file mode 100644 index 000000000..8b00311b0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeEmptyMatcher struct { +} + +func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == 0, nil +} + +func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be empty") +} + +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be empty") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go new file mode 100644 index 000000000..97ab20a4e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -0,0 +1,34 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeEquivalentToMatcher struct { + Expected interface{} +} + +func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Both actual and expected must not be nil.") + } + + convertedActual := actual + + if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) { + convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface() + } + + return reflect.DeepEqual(convertedActual, matcher.Expected), nil +} + +func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be equivalent to", matcher.Expected) +} + +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be equivalent to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go new file mode 100644 index 000000000..91d3b779e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeFalseMatcher struct { +} + +func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual == false, nil +} + +func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be false") +} + +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be false") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go new file mode 100644 index 000000000..fdcda4d1f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "runtime" + + "github.com/onsi/gomega/format" +) + +type BeIdenticalToMatcher struct { + Expected interface{} +} + +func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + success = false + matchErr = nil + } + } + }() + + return actual == matcher.Expected, nil +} + +func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, "to be identical to", matcher.Expected) +} + +func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, "not to be identical to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go new file mode 100644 index 000000000..7ee84fe1b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -0,0 +1,18 @@ +package matchers + +import "github.com/onsi/gomega/format" + +type BeNilMatcher struct { +} + +func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { + return isNil(actual), nil +} + +func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be nil") +} + +func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be nil") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go new file mode 100644 index 000000000..9f4f77eec --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -0,0 +1,132 @@ +package matchers + +import ( + "fmt" + "math" + + "github.com/onsi/gomega/format" +) + +type BeNumericallyMatcher struct { + Comparator string + CompareTo []interface{} +} + +func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { + return matcher.FormatFailureMessage(actual, false) +} + +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return matcher.FormatFailureMessage(actual, true) +} + +func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) { + if len(matcher.CompareTo) == 1 { + message = fmt.Sprintf("to be %s", matcher.Comparator) + } else { + message = fmt.Sprintf("to be within %v of %s", matcher.CompareTo[1], matcher.Comparator) + } + if negated { + message = "not " + message + } + return format.Message(actual, message, matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { + if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { + return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) + } + if !isNumber(actual) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) + } + if !isNumber(matcher.CompareTo[0]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + if isFloat(actual) || isFloat(matcher.CompareTo[0]) { + var secondOperand float64 = 1e-8 + if len(matcher.CompareTo) == 2 { + secondOperand = toFloat(matcher.CompareTo[1]) + } + success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) + } else if isInteger(actual) { + var secondOperand int64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toInteger(matcher.CompareTo[1]) + } + success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) + } else if isUnsignedInteger(actual) { + var secondOperand uint64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toUnsignedInteger(matcher.CompareTo[1]) + } + success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) + } else { + return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) + } + + return success, nil +} + +func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { + switch matcher.Comparator { + case "==", "~": + diff := actual - compareTo + return -threshold <= diff && diff <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { + switch matcher.Comparator { + case "==", "~": + if actual < compareTo { + actual, compareTo = compareTo, actual + } + return actual-compareTo <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { + switch matcher.Comparator { + case "~": + return math.Abs(actual-compareTo) <= threshold + case "==": + return (actual == compareTo) + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go new file mode 100644 index 000000000..302dd1a0a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -0,0 +1,71 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeSentMatcher struct { + Arg interface{} + channelClosed bool +} + +func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.RecvDir { + return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1)) + } + + argType := reflect.TypeOf(matcher.Arg) + assignable := argType.AssignableTo(channelType.Elem()) + + if !assignable { + return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1)) + } + + argValue := reflect.ValueOf(matcher.Arg) + + defer func() { + if e := recover(); e != nil { + success = false + err = fmt.Errorf("Cannot send to a closed channel") + matcher.channelClosed = true + } + }() + + winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ + {Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, + {Dir: reflect.SelectDefault}, + }) + + var didSend bool + if winnerIndex == 0 { + didSend = true + } + + return didSend, nil +} + +func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go new file mode 100644 index 000000000..cb7c038ef --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -0,0 +1,66 @@ +package matchers + +import ( + "fmt" + "time" + + "github.com/onsi/gomega/format" +) + +type BeTemporallyMatcher struct { + Comparator string + CompareTo time.Time + Threshold []time.Duration +} + +func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { + // predicate to test for time.Time type + isTime := func(t interface{}) bool { + _, ok := t.(time.Time) + return ok + } + + if !isTime(actual) { + return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + var threshold = time.Millisecond + if len(matcher.Threshold) == 1 { + threshold = matcher.Threshold[0] + } + + return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil +} + +func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) { + switch matcher.Comparator { + case "==": + return actual.Equal(compareTo) + case "~": + diff := actual.Sub(compareTo) + return -threshold <= diff && diff <= threshold + case ">": + return actual.After(compareTo) + case ">=": + return !actual.Before(compareTo) + case "<": + return actual.Before(compareTo) + case "<=": + return !actual.After(compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go new file mode 100644 index 000000000..ec57c5db4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeTrueMatcher struct { +} + +func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual.(bool), nil +} + +func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be true") +} + +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be true") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go new file mode 100644 index 000000000..26196f168 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeZeroMatcher struct { +} + +func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return true, nil + } + zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() + + return reflect.DeepEqual(zeroValue, actual), nil + +} + +func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be zero-valued") +} + +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be zero-valued") +} diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go new file mode 100644 index 000000000..7b0e08868 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -0,0 +1,80 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" +) + +type ConsistOfMatcher struct { + Elements []interface{} +} + +func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elements := matcher.Elements + if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { + elements = []interface{}{} + value := reflect.ValueOf(matcher.Elements[0]) + for i := 0; i < value.Len(); i++ { + elements = append(elements, value.Index(i).Interface()) + } + } + + matchers := []interface{}{} + for _, element := range elements { + matcher, isMatcher := element.(omegaMatcher) + if !isMatcher { + matcher = &EqualMatcher{Expected: element} + } + matchers = append(matchers, matcher) + } + + values := matcher.valuesOf(actual) + + if len(values) != len(matchers) { + return false, nil + } + + neighbours := func(v, m interface{}) (bool, error) { + match, err := m.(omegaMatcher).Match(v) + return match && err == nil, nil + } + + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) + if err != nil { + return false, err + } + + return len(bipartiteGraph.LargestMatching()) == len(values), nil +} + +func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { + value := reflect.ValueOf(actual) + values := []interface{}{} + if isMap(actual) { + keys := value.MapKeys() + for i := 0; i < value.Len(); i++ { + values = append(values, value.MapIndex(keys[i]).Interface()) + } + } else { + for i := 0; i < value.Len(); i++ { + values = append(values, value.Index(i).Interface()) + } + } + + return values +} + +func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to consist of", matcher.Elements) +} + +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to consist of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go new file mode 100644 index 000000000..4159335d0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -0,0 +1,56 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ContainElementMatcher struct { + Element interface{} +} + +func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + var keys []reflect.Value + if isMap(actual) { + keys = value.MapKeys() + } + var lastError error + for i := 0; i < value.Len(); i++ { + var success bool + var err error + if isMap(actual) { + success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) + } else { + success, err = elemMatcher.Match(value.Index(i).Interface()) + } + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go new file mode 100644 index 000000000..f8dc41e74 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/format" +) + +type ContainSubstringMatcher struct { + Substr string + Args []interface{} +} + +func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + + return strings.Contains(actualString, matcher.stringToMatch()), nil +} + +func (matcher *ContainSubstringMatcher) stringToMatch() string { + stringToMatch := matcher.Substr + if len(matcher.Args) > 0 { + stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...) + } + return stringToMatch +} + +func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain substring", matcher.stringToMatch()) +} + +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain substring", matcher.stringToMatch()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go new file mode 100644 index 000000000..befb7bdfd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type EqualMatcher struct { + Expected interface{} +} + +func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + // Shortcut for byte slices. + // Comparing long byte slices with reflect.DeepEqual is very slow, + // so use bytes.Equal if actual and expected are both byte slices. + if actualByteSlice, ok := actual.([]byte); ok { + if expectedByteSlice, ok := matcher.Expected.([]byte); ok { + return bytes.Equal(actualByteSlice, expectedByteSlice), nil + } + } + return reflect.DeepEqual(actual, matcher.Expected), nil +} + +func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { + actualString, actualOK := actual.(string) + expectedString, expectedOK := matcher.Expected.(string) + if actualOK && expectedOK { + return format.MessageWithDiff(actualString, "to equal", expectedString) + } + + return format.Message(actual, "to equal", matcher.Expected) +} + +func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go new file mode 100644 index 000000000..7ace93dc3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveCapMatcher struct { + Count int +} + +func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := capOf(actual) + if !ok { + return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go new file mode 100644 index 000000000..ea5b92336 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveKeyMatcher struct { + Key interface{} +} + +func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + return true, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "to have key matching", matcher.Key) + default: + return format.Message(actual, "to have key", matcher.Key) + } +} + +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "not to have key matching", matcher.Key) + default: + return format.Message(actual, "not to have key", matcher.Key) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go new file mode 100644 index 000000000..06355b1e9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -0,0 +1,74 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveKeyWithValueMatcher struct { + Key interface{} + Value interface{} +} + +func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher) + if !valueIsMatcher { + valueMatcher = &EqualMatcher{Expected: matcher.Value} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + actualValue := reflect.ValueOf(actual).MapIndex(keys[i]) + success, err := valueMatcher.Match(actualValue.Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + } + return success, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { + str := "to have {key: value}" + if _, ok := matcher.Key.(omegaMatcher); ok { + str += " matching" + } else if _, ok := matcher.Value.(omegaMatcher); ok { + str += " matching" + } + + expect := make(map[interface{}]interface{}, 1) + expect[matcher.Key] = matcher.Value + return format.Message(actual, str, expect) +} + +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + kStr := "not to have key" + if _, ok := matcher.Key.(omegaMatcher); ok { + kStr = "not to have key matching" + } + + vStr := "or that key's value not be" + if _, ok := matcher.Value.(omegaMatcher); ok { + vStr = "or to have that key's value not matching" + } + + return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go new file mode 100644 index 000000000..ee4276189 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveLenMatcher struct { + Count int +} + +func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go new file mode 100644 index 000000000..bef00ae21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveOccurredMatcher struct { +} + +func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return false, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be non-nil (or a pointer to a non-nil) + return !isNil(actual), nil +} + +func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred") +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go new file mode 100644 index 000000000..1d8e80270 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HavePrefixMatcher struct { + Prefix string + Args []interface{} +} + +func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + prefix := matcher.prefix() + return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil +} + +func (matcher *HavePrefixMatcher) prefix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Prefix, matcher.Args...) + } + return matcher.Prefix +} + +func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have prefix", matcher.prefix()) +} + +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have prefix", matcher.prefix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go new file mode 100644 index 000000000..40a3526eb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveSuffixMatcher struct { + Suffix string + Args []interface{} +} + +func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + suffix := matcher.suffix() + return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil +} + +func (matcher *HaveSuffixMatcher) suffix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Suffix, matcher.Args...) + } + return matcher.Suffix +} + +func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have suffix", matcher.suffix()) +} + +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have suffix", matcher.suffix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go new file mode 100644 index 000000000..07499ac95 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -0,0 +1,51 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type MatchErrorMatcher struct { + Expected interface{} +} + +func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { + if isNil(actual) { + return false, fmt.Errorf("Expected an error, got nil") + } + + if !isError(actual) { + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) + } + + actualErr := actual.(error) + + if isError(matcher.Expected) { + return reflect.DeepEqual(actualErr, matcher.Expected), nil + } + + if isString(matcher.Expected) { + return actualErr.Error() == matcher.Expected, nil + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + if matcher.Expected != nil { + subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher) + if hasSubMatcher { + return subMatcher.Match(actualErr.Error()) + } + } + + return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match error", matcher.Expected) +} + +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match error", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go new file mode 100644 index 000000000..f962f139f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/onsi/gomega/format" +) + +type MatchJSONMatcher struct { + JSONToMatch interface{} + firstFailurePath []interface{} +} + +func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.prettyPrint(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + // this is guarded by prettyPrint + json.Unmarshal([]byte(actualString), &aval) + json.Unmarshal([]byte(expectedString), &eval) + var equal bool + equal, matcher.firstFailurePath = deepEqual(aval, eval) + return equal, nil +} + +func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.JSONToMatch) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.JSONToMatch, 1)) + } + + abuf := new(bytes.Buffer) + ebuf := new(bytes.Buffer) + + if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil { + return "", "", fmt.Errorf("Actual '%s' should be valid JSON, but it is not.\nUnderlying error:%s", actualString, err) + } + + if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil { + return "", "", fmt.Errorf("Expected '%s' should be valid JSON, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return abuf.String(), ebuf.String(), nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go new file mode 100644 index 000000000..adac5db6b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -0,0 +1,43 @@ +package matchers + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +type MatchRegexpMatcher struct { + Regexp string + Args []interface{} +} + +func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) + } + + match, err := regexp.Match(matcher.regexp(), []byte(actualString)) + if err != nil { + return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error()) + } + + return match, nil +} + +func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) regexp() string { + re := matcher.Regexp + if len(matcher.Args) > 0 { + re = fmt.Sprintf(matcher.Regexp, matcher.Args...) + } + return re +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go new file mode 100644 index 000000000..5c815f5af --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -0,0 +1,134 @@ +package matchers + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + + "github.com/onsi/gomega/format" + "golang.org/x/net/html/charset" +) + +type MatchXMLMatcher struct { + XMLToMatch interface{} +} + +func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.formattedPrint(actual) + if err != nil { + return false, err + } + + aval, err := parseXmlContent(actualString) + if err != nil { + return false, fmt.Errorf("Actual '%s' should be valid XML, but it is not.\nUnderlying error:%s", actualString, err) + } + + eval, err := parseXmlContent(expectedString) + if err != nil { + return false, fmt.Errorf("Expected '%s' should be valid XML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) { + var ok bool + actualString, ok = toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok = toString(matcher.XMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.XMLToMatch, 1)) + } + return actualString, expectedString, nil +} + +func parseXmlContent(content string) (*xmlNode, error) { + allNodes := []*xmlNode{} + + dec := newXmlDecoder(strings.NewReader(content)) + for { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to decode next token: %v", err) // untested section + } + + lastNodeIndex := len(allNodes) - 1 + var lastNode *xmlNode + if len(allNodes) > 0 { + lastNode = allNodes[lastNodeIndex] + } else { + lastNode = &xmlNode{} + } + + switch tok := tok.(type) { + case xml.StartElement: + attrs := attributesSlice(tok.Attr) + sort.Sort(attrs) + allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr}) + case xml.EndElement: + if len(allNodes) > 1 { + allNodes[lastNodeIndex-1].Nodes = append(allNodes[lastNodeIndex-1].Nodes, lastNode) + allNodes = allNodes[:lastNodeIndex] + } + case xml.CharData: + lastNode.Content = append(lastNode.Content, tok.Copy()...) + case xml.Comment: + lastNode.Comments = append(lastNode.Comments, tok.Copy()) // untested section + case xml.ProcInst: + lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy()) + } + } + + if len(allNodes) == 0 { + return nil, errors.New("found no nodes") + } + firstNode := allNodes[0] + trimParentNodesContentSpaces(firstNode) + + return firstNode, nil +} + +func newXmlDecoder(reader io.Reader) *xml.Decoder { + dec := xml.NewDecoder(reader) + dec.CharsetReader = charset.NewReaderLabel + return dec +} + +func trimParentNodesContentSpaces(node *xmlNode) { + if len(node.Nodes) > 0 { + node.Content = bytes.TrimSpace(node.Content) + for _, childNode := range node.Nodes { + trimParentNodesContentSpaces(childNode) + } + } +} + +type xmlNode struct { + XMLName xml.Name + Comments []xml.Comment + ProcInsts []xml.ProcInst + XMLAttr []xml.Attr + Content []byte + Nodes []*xmlNode +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go new file mode 100644 index 000000000..0c83c2b63 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -0,0 +1,76 @@ +package matchers + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/format" + "gopkg.in/yaml.v2" +) + +type MatchYAMLMatcher struct { + YAMLToMatch interface{} + firstFailurePath []interface{} +} + +func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil { + return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err) + } + if err := yaml.Unmarshal([]byte(expectedString), &eval); err != nil { + return false, fmt.Errorf("Expected '%s' should be valid YAML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + var equal bool + equal, matcher.firstFailurePath = deepEqual(aval, eval) + return equal, nil +} + +func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + return normalise(actualString), normalise(expectedString), err +} + +func normalise(input string) string { + var val interface{} + err := yaml.Unmarshal([]byte(input), &val) + if err != nil { + panic(err) // unreachable since Match already calls Unmarshal + } + output, err := yaml.Marshal(val) + if err != nil { + panic(err) // untested section, unreachable since we Unmarshal above + } + return strings.TrimSpace(string(output)) +} + +func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.YAMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.YAMLToMatch, 1)) + } + + return actualString, expectedString, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go new file mode 100644 index 000000000..2c91670bd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type NotMatcher struct { + Matcher types.GomegaMatcher +} + +func (m *NotMatcher) Match(actual interface{}) (bool, error) { + success, err := m.Matcher.Match(actual) + if err != nil { + return false, err + } + return !success, nil +} + +func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.FailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +} diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go new file mode 100644 index 000000000..3bf799800 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -0,0 +1,67 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type OrMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstSuccessfulMatcher types.GomegaMatcher +} + +func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { + m.firstSuccessfulMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if err != nil { + return false, err + } + if success { + m.firstSuccessfulMatcher = matcher + return true, nil + } + } + return false, nil +} + +func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) +} + +func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) +} + +func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: F, T, => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become F + + Match eval: F, F, F => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to T. + */ + + if m.firstSuccessfulMatcher != nil { + // one of the matchers succeeded.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + } else { + // so all matchers failed.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go new file mode 100644 index 000000000..640f4db1a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -0,0 +1,46 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type PanicMatcher struct { + object interface{} +} + +func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") + } + + actualType := reflect.TypeOf(actual) + if actualType.Kind() != reflect.Func { + return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1)) + } + if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) { + return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1)) + } + + success = false + defer func() { + if e := recover(); e != nil { + matcher.object = e + success = true + } + }() + + reflect.ValueOf(actual).Call([]reflect.Value{}) + + return +} + +func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to panic") +} + +func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) +} diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go new file mode 100644 index 000000000..2018a6128 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -0,0 +1,128 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ReceiveMatcher struct { + Arg interface{} + receivedValue reflect.Value + channelClosed bool +} + +func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1)) + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + + if matcher.Arg != nil { + subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + if !hasSubMatcher { + argType := reflect.TypeOf(matcher.Arg) + if argType.Kind() != reflect.Ptr { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + } + } + + winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ + {Dir: reflect.SelectRecv, Chan: channelValue}, + {Dir: reflect.SelectDefault}, + }) + + var closed bool + var didReceive bool + if winnerIndex == 0 { + closed = !open + didReceive = open + } + matcher.channelClosed = closed + + if closed { + return false, nil + } + + if hasSubMatcher { + if didReceive { + matcher.receivedValue = value + return subMatcher.Match(matcher.receivedValue.Interface()) + } + return false, nil + } + + if didReceive { + if matcher.Arg != nil { + outValue := reflect.ValueOf(matcher.Arg) + + if value.Type().AssignableTo(outValue.Elem().Type()) { + outValue.Elem().Set(value) + return true, nil + } + if value.Type().Kind() == reflect.Interface && value.Elem().Type().AssignableTo(outValue.Elem().Type()) { + outValue.Elem().Set(value.Elem()) + return true, nil + } else { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + } + + } + + return true, nil + } + return false, nil +} + +func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.FailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "to receive something."+closedAddendum) +} + +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "not to receive anything."+closedAddendum) +} + +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go new file mode 100644 index 000000000..639295684 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go @@ -0,0 +1,92 @@ +package matchers + +import ( + "fmt" + "reflect" + "strings" +) + +func formattedMessage(comparisonMessage string, failurePath []interface{}) string { + var diffMessage string + if len(failurePath) == 0 { + diffMessage = "" + } else { + diffMessage = fmt.Sprintf("\n\nfirst mismatched key: %s", formattedFailurePath(failurePath)) + } + return fmt.Sprintf("%s%s", comparisonMessage, diffMessage) +} + +func formattedFailurePath(failurePath []interface{}) string { + formattedPaths := []string{} + for i := len(failurePath) - 1; i >= 0; i-- { + switch p := failurePath[i].(type) { + case int: + formattedPaths = append(formattedPaths, fmt.Sprintf(`[%d]`, p)) + default: + if i != len(failurePath)-1 { + formattedPaths = append(formattedPaths, ".") + } + formattedPaths = append(formattedPaths, fmt.Sprintf(`"%s"`, p)) + } + } + return strings.Join(formattedPaths, "") +} + +func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { + var errorPath []interface{} + if reflect.TypeOf(a) != reflect.TypeOf(b) { + return false, errorPath + } + + switch a.(type) { + case []interface{}: + if len(a.([]interface{})) != len(b.([]interface{})) { + return false, errorPath + } + + for i, v := range a.([]interface{}) { + elementEqual, keyPath := deepEqual(v, b.([]interface{})[i]) + if !elementEqual { + return false, append(keyPath, i) + } + } + return true, errorPath + + case map[interface{}]interface{}: + if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) { + return false, errorPath + } + + for k, v1 := range a.(map[interface{}]interface{}) { + v2, ok := b.(map[interface{}]interface{})[k] + if !ok { + return false, errorPath + } + elementEqual, keyPath := deepEqual(v1, v2) + if !elementEqual { + return false, append(keyPath, k) + } + } + return true, errorPath + + case map[string]interface{}: + if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) { + return false, errorPath + } + + for k, v1 := range a.(map[string]interface{}) { + v2, ok := b.(map[string]interface{})[k] + if !ok { + return false, errorPath + } + elementEqual, keyPath := deepEqual(v1, v2) + if !elementEqual { + return false, append(keyPath, k) + } + } + return true, errorPath + + default: + return a == b, errorPath + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go new file mode 100644 index 000000000..721ed5529 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type SucceedMatcher struct { +} + +func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return true, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be nil (or a pointer to a nil) + return isNil(actual), nil +} + +func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) +} + +func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "Expected failure, but got no error." +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go new file mode 100644 index 000000000..8aaf8759d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -0,0 +1,41 @@ +package bipartitegraph + +import "errors" +import "fmt" + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" + +type BipartiteGraph struct { + Left NodeOrderedSet + Right NodeOrderedSet + Edges EdgeSet +} + +func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { + left := NodeOrderedSet{} + for i := range leftValues { + left = append(left, Node{Id: i}) + } + + right := NodeOrderedSet{} + for j := range rightValues { + right = append(right, Node{Id: j + len(left)}) + } + + edges := EdgeSet{} + for i, leftValue := range leftValues { + for j, rightValue := range rightValues { + neighbours, err := neighbours(leftValue, rightValue) + if err != nil { + return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) + } + + if neighbours { + edges = append(edges, Edge{Node1: left[i], Node2: right[j]}) + } + } + } + + return &BipartiteGraph{left, right, edges}, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go new file mode 100644 index 000000000..8181f43a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -0,0 +1,159 @@ +package bipartitegraph + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" +import "github.com/onsi/gomega/matchers/support/goraph/util" + +func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { + paths := bg.maximalDisjointSLAPCollection(matching) + + for len(paths) > 0 { + for _, path := range paths { + matching = matching.SymmetricDifference(path) + } + paths = bg.maximalDisjointSLAPCollection(matching) + } + + return +} + +func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) { + guideLayers := bg.createSLAPGuideLayers(matching) + if len(guideLayers) == 0 { + return + } + + used := make(map[Node]bool) + + for _, u := range guideLayers[len(guideLayers)-1] { + slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) + if found { + for _, edge := range slap { + used[edge.Node1] = true + used[edge.Node2] = true + } + result = append(result, slap) + } + } + + return +} + +func (bg *BipartiteGraph) findDisjointSLAP( + start Node, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) ([]Edge, bool) { + return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) +} + +func (bg *BipartiteGraph) findDisjointSLAPHelper( + currentNode Node, + currentSLAP EdgeSet, + currentLevel int, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) (EdgeSet, bool) { + used[currentNode] = true + + if currentLevel == 0 { + return currentSLAP, true + } + + for _, nextNode := range guideLayers[currentLevel-1] { + if used[nextNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(currentNode, nextNode) + if !found { + continue + } + + if matching.Contains(edge) == util.Odd(currentLevel) { + continue + } + + currentSLAP = append(currentSLAP, edge) + slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used) + if found { + return slap, true + } + currentSLAP = currentSLAP[:len(currentSLAP)-1] + } + + used[currentNode] = false + return nil, false +} + +func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { + used := make(map[Node]bool) + currentLayer := NodeOrderedSet{} + + for _, node := range bg.Left { + if matching.Free(node) { + used[node] = true + currentLayer = append(currentLayer, node) + } + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + + done := false + + for !done { + lastLayer := currentLayer + currentLayer = NodeOrderedSet{} + + if util.Odd(len(guideLayers)) { + for _, leftNode := range lastLayer { + for _, rightNode := range bg.Right { + if used[rightNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, rightNode) + used[rightNode] = true + + if matching.Free(rightNode) { + done = true + } + } + } + } else { + for _, rightNode := range lastLayer { + for _, leftNode := range bg.Left { + if used[leftNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || !matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, leftNode) + used[leftNode] = true + } + } + + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + } + + return +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go new file mode 100644 index 000000000..4fd15cc06 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -0,0 +1,61 @@ +package edge + +import . "github.com/onsi/gomega/matchers/support/goraph/node" + +type Edge struct { + Node1 Node + Node2 Node +} + +type EdgeSet []Edge + +func (ec EdgeSet) Free(node Node) bool { + for _, e := range ec { + if e.Node1 == node || e.Node2 == node { + return false + } + } + + return true +} + +func (ec EdgeSet) Contains(edge Edge) bool { + for _, e := range ec { + if e == edge { + return true + } + } + + return false +} + +func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { + for _, e := range ec { + if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { + return e, true + } + } + + return Edge{}, false +} + +func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet { + edgesToInclude := make(map[Edge]bool) + + for _, e := range ec { + edgesToInclude[e] = true + } + + for _, e := range ec2 { + edgesToInclude[e] = !edgesToInclude[e] + } + + result := EdgeSet{} + for e, include := range edgesToInclude { + if include { + result = append(result, e) + } + } + + return result +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go new file mode 100644 index 000000000..800c2ea8c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -0,0 +1,7 @@ +package node + +type Node struct { + Id int +} + +type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go new file mode 100644 index 000000000..d76a1ee00 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go @@ -0,0 +1,7 @@ +package util + +import "math" + +func Odd(n int) bool { + return math.Mod(float64(n), 2.0) == 1.0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go new file mode 100644 index 000000000..75afcd844 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -0,0 +1,179 @@ +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ +package matchers + +import ( + "encoding/json" + "fmt" + "reflect" +) + +type omegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} + +func isBool(a interface{}) bool { + return reflect.TypeOf(a).Kind() == reflect.Bool +} + +func isNumber(a interface{}) bool { + if a == nil { + return false + } + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Float64 +} + +func isInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Int64 +} + +func isUnsignedInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Uint <= kind && kind <= reflect.Uint64 +} + +func isFloat(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Float32 <= kind && kind <= reflect.Float64 +} + +func toInteger(a interface{}) int64 { + if isInteger(a) { + return reflect.ValueOf(a).Int() + } else if isUnsignedInteger(a) { + return int64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return int64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toUnsignedInteger(a interface{}) uint64 { + if isInteger(a) { + return uint64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return reflect.ValueOf(a).Uint() + } else if isFloat(a) { + return uint64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toFloat(a interface{}) float64 { + if isInteger(a) { + return float64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return float64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return reflect.ValueOf(a).Float() + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func isError(a interface{}) bool { + _, ok := a.(error) + return ok +} + +func isChan(a interface{}) bool { + if isNil(a) { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Chan +} + +func isMap(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Map +} + +func isArrayOrSlice(a interface{}) bool { + if a == nil { + return false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} + +func isString(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.String +} + +func toString(a interface{}) (string, bool) { + aString, isString := a.(string) + if isString { + return aString, true + } + + aBytes, isBytes := a.([]byte) + if isBytes { + return string(aBytes), true + } + + aStringer, isStringer := a.(fmt.Stringer) + if isStringer { + return aStringer.String(), true + } + + aJSONRawMessage, isJSONRawMessage := a.(json.RawMessage) + if isJSONRawMessage { + return string(aJSONRawMessage), true + } + + return "", false +} + +func lengthOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Len(), true + default: + return 0, false + } +} +func capOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Cap(), true + default: + return 0, false + } +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go new file mode 100644 index 000000000..8e58d8a0f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -0,0 +1,72 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type WithTransformMatcher struct { + // input + Transform interface{} // must be a function of one parameter that returns one value + Matcher types.GomegaMatcher + + // cached value + transformArgType reflect.Type + + // state + transformedValue interface{} +} + +func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { + if transform == nil { + panic("transform function cannot be nil") + } + txType := reflect.TypeOf(transform) + if txType.NumIn() != 1 { + panic("transform function must have 1 argument") + } + if txType.NumOut() != 1 { + panic("transform function must have 1 return value") + } + + return &WithTransformMatcher{ + Transform: transform, + Matcher: matcher, + transformArgType: reflect.TypeOf(transform).In(0), + } +} + +func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { + // return error if actual's type is incompatible with Transform function's argument type + actualType := reflect.TypeOf(actual) + if !actualType.AssignableTo(m.transformArgType) { + return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + } + + // call the Transform function with `actual` + fn := reflect.ValueOf(m.Transform) + result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + m.transformedValue = result[0].Interface() // expect exactly one value + + return m.Matcher.Match(m.transformedValue) +} + +func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { + return m.Matcher.FailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { + // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) + // + // Querying the next matcher is fine if the transformer always will return the same value. + // But if the transformer is non-deterministic and returns a different value each time, then there + // is no point in querying the next matcher, since it can only comment on the last transformed value. + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +} diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go new file mode 100644 index 000000000..ac59a3a5a --- /dev/null +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -0,0 +1,26 @@ +package types + +type TWithHelper interface { + Helper() +} + +type GomegaFailHandler func(message string, callerSkip ...int) + +type GomegaFailWrapper struct { + Fail GomegaFailHandler + TWithHelper TWithHelper +} + +//A simple *testing.T interface wrapper +type GomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +//All Gomega matchers must implement the GomegaMatcher interface +// +//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers +type GomegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index d4b92663b..000000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e75..000000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 6483ba2af..000000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade0..000000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 7421f326f..000000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 2874a048c..000000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,147 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strings" -) - -// Frame represents a program counter inside a stack frame. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - fmt.Fprintf(s, "%d", f.line()) - case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - fmt.Fprintf(s, "%v", []Frame(st)) - } - case 's': - fmt.Fprintf(s, "%s", []Frame(st)) - } -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index 0637db726..000000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go - -go: - - 1.9 - - "1.10" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build - - go test -race -v ./... - diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e266..000000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index 0c9b04b53..000000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,452 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is an filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in a any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware - - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define you own file system. -* Wrap for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open('/tmp/foo') -``` -We would replace it with: -```go -AppFs.Open('/tmp/foo') -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755)) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755)) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir())) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* ZIP -* TAR -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -* **0.10.0** 2015.12.10 - * Full compatibility with Windows - * Introduction of afero utilities - * Test suite rewritten to work cross platform - * Normalize paths for MemMapFs - * Adding Sync to the file interface - * **Breaking Change** Walk and ReadDir have changed parameter order - * Moving types used by MemMapFs to a subpackage - * General bugfixes and improvements -* **0.9.0** 2015.11.05 - * New Walk function similar to filepath.Walk - * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC - * MemMapFs.Remove now really deletes the file - * InMemoryFile.Readdir and Readdirnames work correctly - * InMemoryFile functions lock it for concurrent access - * Test suite improvements -* **0.8.0** 2014.10.28 - * First public version - * Interfaces feel ready for people to build using - * Interfaces satisfy all known uses - * MemMapFs passes the majority of the OS test suite - * OsFs passes the majority of the OS test suite - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index f5b5e127c..000000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - //Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - //Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index a633ad500..000000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - - go env - - go get -v github.com/spf13/afero/... - - go build github.com/spf13/afero -test_script: -- cmd: go test -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 616ff8ff7..000000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,180 +0,0 @@ -package afero - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var _ Lstater = (*BasePathFs)(nil) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 29a26c67d..000000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,290 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyToLayer(name); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 5728243d9..000000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 968fc2783..000000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index e8108a851..000000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,293 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes() and Chmod()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) -} diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod deleted file mode 100644 index 086855099..000000000 --- a/vendor/github.com/spf13/afero/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/afero - -require golang.org/x/text v0.3.0 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum deleted file mode 100644 index 6bad37b2a..000000000 --- a/vendor/github.com/spf13/afero/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index c42193688..000000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index 5c3a3d8ff..000000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, prefix string) (f File, err error) { - return TempFile(a.Fs, dir, prefix) -} - -func TempFile(fs Fs, dir, prefix string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) - err = fs.Mkdir(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0a..000000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index c18a87fb7..000000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.IndexAny(path, "*?[") >= 0 -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f4..000000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5b..000000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 7af2fb56f..000000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" -) - -import "time" - -const FilePathSeparator = string(filepath.Separator) - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed == true { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Read(b) -} - -func (f *File) Truncate(size int64) error { - if f.closed == true { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { - return 0, ErrFileClosed - } - switch whence { - case 0: - atomic.StoreInt64(&f.at, offset) - case 1: - atomic.AddInt64(&f.at, int64(offset)) - case 2: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index 09498e70f..000000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, 0777) - if err != nil { - //log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - //log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - m.mu.Unlock() - - m.Chmod(name, perm|os.ModeDir) - - return nil -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - chmod := false - file, err := m.openWrite(name) - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - m.Chmod(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p, _ := range m.getData() { - if strings.HasPrefix(p, path) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} - -// func debugMemMapList(fs Fs) { -// if x, ok := fs.(*MemMapFs); ok { -// x.List() -// } -// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index 13cc1b84c..000000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f6..000000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index c6376ec37..000000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,80 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 9d92dbc05..000000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,214 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -// -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index eda96312d..000000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,320 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil - -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - - if c <= 0 && len(f.files) == 0 { - return f.files, nil - } - - if f.off >= len(f.files) { - return nil, io.EOF - } - - if c <= 0 { - return f.files[f.off:], nil - } - - if c > len(f.files) { - c = len(f.files) - } - - defer func() { f.off += c }() - return f.files[f.off:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 4f253f481..000000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 000000000..cd0a8ac15 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go new file mode 100644 index 000000000..5d052781b --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/gen.go @@ -0,0 +1,712 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go +//go:generate go run gen.go -test + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "math/rand" + "os" + "sort" + "strings" +) + +// identifier converts s to a Go exported identifier. +// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". +func identifier(s string) string { + b := make([]byte, 0, len(s)) + cap := true + for _, c := range s { + if c == '-' { + cap = true + continue + } + if cap && 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + cap = false + b = append(b, byte(c)) + } + return string(b) +} + +var test = flag.Bool("test", false, "generate table_test.go") + +func genFile(name string, buf *bytes.Buffer) { + b, err := format.Source(buf.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile(name, b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main() { + flag.Parse() + + var all []string + all = append(all, elements...) + all = append(all, attributes...) + all = append(all, eventHandlers...) + all = append(all, extra...) + sort.Strings(all) + + // uniq - lists have dups + w := 0 + for _, s := range all { + if w == 0 || all[w-1] != s { + all[w] = s + w++ + } + } + all = all[:w] + + if *test { + var buf bytes.Buffer + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") + fmt.Fprintln(&buf, "package atom\n") + fmt.Fprintln(&buf, "var testAtomList = []string{") + for _, s := range all { + fmt.Fprintf(&buf, "\t%q,\n", s) + } + fmt.Fprintln(&buf, "}") + + genFile("table_test.go", &buf) + return + } + + // Find hash that minimizes table size. + var best *table + for i := 0; i < 1000000; i++ { + if best != nil && 1<<(best.k-1) < len(all) { + break + } + h := rand.Uint32() + for k := uint(0); k <= 16; k++ { + if best != nil && k >= best.k { + break + } + var t table + if t.init(h, k, all) { + best = &t + break + } + } + } + if best == nil { + fmt.Fprintf(os.Stderr, "failed to construct string table\n") + os.Exit(1) + } + + // Lay out strings, using overlaps when possible. + layout := append([]string{}, all...) + + // Remove strings that are substrings of other strings + for changed := true; changed; { + changed = false + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i != j && t != "" && strings.Contains(s, t) { + changed = true + layout[j] = "" + } + } + } + } + + // Join strings where one suffix matches another prefix. + for { + // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], + // maximizing overlap length k. + besti := -1 + bestj := -1 + bestk := 0 + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i == j { + continue + } + for k := bestk + 1; k <= len(s) && k <= len(t); k++ { + if s[len(s)-k:] == t[:k] { + besti = i + bestj = j + bestk = k + } + } + } + } + if bestk > 0 { + layout[besti] += layout[bestj][bestk:] + layout[bestj] = "" + continue + } + break + } + + text := strings.Join(layout, "") + + atom := map[string]uint32{} + for _, s := range all { + off := strings.Index(text, s) + if off < 0 { + panic("lost string " + s) + } + atom[s] = uint32(off<<8 | len(s)) + } + + var buf bytes.Buffer + // Generate the Go code. + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go\n") + fmt.Fprintln(&buf, "package atom\n\nconst (") + + // compute max len + maxLen := 0 + for _, s := range all { + if maxLen < len(s) { + maxLen = len(s) + } + fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) + } + fmt.Fprintln(&buf, ")\n") + + fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) + fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) + + fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) + for i, s := range best.tab { + if s == "" { + continue + } + fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) + } + fmt.Fprintf(&buf, "}\n") + datasize := (1 << best.k) * 4 + + fmt.Fprintln(&buf, "const atomText =") + textsize := len(text) + for len(text) > 60 { + fmt.Fprintf(&buf, "\t%q +\n", text[:60]) + text = text[60:] + } + fmt.Fprintf(&buf, "\t%q\n\n", text) + + genFile("table.go", &buf) + + fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) +} + +type byLen []string + +func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } +func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byLen) Len() int { return len(x) } + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s string) uint32 { + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// A table represents an attempt at constructing the lookup table. +// The lookup table uses cuckoo hashing, meaning that each string +// can be found in one of two positions. +type table struct { + h0 uint32 + k uint + mask uint32 + tab []string +} + +// hash returns the two hashes for s. +func (t *table) hash(s string) (h1, h2 uint32) { + h := fnv(t.h0, s) + h1 = h & t.mask + h2 = (h >> 16) & t.mask + return +} + +// init initializes the table with the given parameters. +// h0 is the initial hash value, +// k is the number of bits of hash value to use, and +// x is the list of strings to store in the table. +// init returns false if the table cannot be constructed. +func (t *table) init(h0 uint32, k uint, x []string) bool { + t.h0 = h0 + t.k = k + t.tab = make([]string, 1< len(t.tab) { + return false + } + s := t.tab[i] + h1, h2 := t.hash(s) + j := h1 + h2 - i + if t.tab[j] != "" && !t.push(j, depth+1) { + return false + } + t.tab[j] = s + return true +} + +// The lists of element names and attribute keys were taken from +// https://html.spec.whatwg.org/multipage/indices.html#index +// as of the "HTML Living Standard - Last Updated 16 April 2018" version. + +// "command", "keygen" and "menuitem" have been removed from the spec, +// but are kept here for backwards compatibility. +var elements = []string{ + "a", + "abbr", + "address", + "area", + "article", + "aside", + "audio", + "b", + "base", + "bdi", + "bdo", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "cite", + "code", + "col", + "colgroup", + "command", + "data", + "datalist", + "dd", + "del", + "details", + "dfn", + "dialog", + "div", + "dl", + "dt", + "em", + "embed", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "hgroup", + "hr", + "html", + "i", + "iframe", + "img", + "input", + "ins", + "kbd", + "keygen", + "label", + "legend", + "li", + "link", + "main", + "map", + "mark", + "menu", + "menuitem", + "meta", + "meter", + "nav", + "noscript", + "object", + "ol", + "optgroup", + "option", + "output", + "p", + "param", + "picture", + "pre", + "progress", + "q", + "rp", + "rt", + "ruby", + "s", + "samp", + "script", + "section", + "select", + "slot", + "small", + "source", + "span", + "strong", + "style", + "sub", + "summary", + "sup", + "table", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "u", + "ul", + "var", + "video", + "wbr", +} + +// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 +// +// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", +// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, +// but are kept here for backwards compatibility. +var attributes = []string{ + "abbr", + "accept", + "accept-charset", + "accesskey", + "action", + "allowfullscreen", + "allowpaymentrequest", + "allowusermedia", + "alt", + "as", + "async", + "autocomplete", + "autofocus", + "autoplay", + "challenge", + "charset", + "checked", + "cite", + "class", + "color", + "cols", + "colspan", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "datetime", + "default", + "defer", + "dir", + "dirname", + "disabled", + "download", + "draggable", + "dropzone", + "enctype", + "for", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "headers", + "height", + "hidden", + "high", + "href", + "hreflang", + "http-equiv", + "icon", + "id", + "inputmode", + "integrity", + "is", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "keytype", + "kind", + "label", + "lang", + "list", + "loop", + "low", + "manifest", + "max", + "maxlength", + "media", + "mediagroup", + "method", + "min", + "minlength", + "multiple", + "muted", + "name", + "nomodule", + "nonce", + "novalidate", + "open", + "optimum", + "pattern", + "ping", + "placeholder", + "playsinline", + "poster", + "preload", + "radiogroup", + "readonly", + "referrerpolicy", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "sandbox", + "spellcheck", + "scope", + "scoped", + "seamless", + "selected", + "shape", + "size", + "sizes", + "sortable", + "sorted", + "slot", + "span", + "spellcheck", + "src", + "srcdoc", + "srclang", + "srcset", + "start", + "step", + "style", + "tabindex", + "target", + "title", + "translate", + "type", + "typemustmatch", + "updateviacache", + "usemap", + "value", + "width", + "workertype", + "wrap", +} + +// "onautocomplete", "onautocompleteerror", "onmousewheel", +// "onshow" and "onsort" have been removed from the spec, +// but are kept here for backwards compatibility. +var eventHandlers = []string{ + "onabort", + "onautocomplete", + "onautocompleteerror", + "onauxclick", + "onafterprint", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncopy", + "oncuechange", + "oncut", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragexit", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadend", + "onloadstart", + "onmessage", + "onmessageerror", + "onmousedown", + "onmouseenter", + "onmouseleave", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onwheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpaste", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onreset", + "onresize", + "onrejectionhandled", + "onscroll", + "onsecuritypolicyviolation", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunhandledrejection", + "onunload", + "onvolumechange", + "onwaiting", +} + +// extra are ad-hoc values not covered by any of the lists above. +var extra = []string{ + "acronym", + "align", + "annotation", + "annotation-xml", + "applet", + "basefont", + "bgsound", + "big", + "blink", + "center", + "color", + "desc", + "face", + "font", + "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. + "foreignobject", + "frame", + "frameset", + "image", + "isindex", + "listing", + "malignmark", + "marquee", + "math", + "mglyph", + "mi", + "mn", + "mo", + "ms", + "mtext", + "nobr", + "noembed", + "noframes", + "plaintext", + "prompt", + "public", + "rb", + "rtc", + "spacer", + "strike", + "svg", + "system", + "tt", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 000000000..2a938864c --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,783 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 + Address Atom = 0x6f307 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 + B Atom = 0x101 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 + Charset Atom = 0x2107 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x46308 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x40508 + Dt Atom = 0x64b02 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 + H1 Atom = 0x15c02 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 + H6 Atom = 0x64d02 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 + Hr Atom = 0x15702 + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a + I Atom = 0x601 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 + Is Atom = 0x16502 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 + Keygen Atom = 0x3206 + Keytype Atom = 0xd607 + Kind Atom = 0x17704 + Label Atom = 0x5905 + Lang Atom = 0x2e404 + Legend Atom = 0x18106 + Li Atom = 0xb202 + Link Atom = 0x17404 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 + Main Atom = 0x1004 + Malignmark Atom = 0xb00a + Manifest Atom = 0x6d708 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 + Ms Atom = 0x67402 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x400c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51d06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 + Radiogroup Atom = 0x30a + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 + Rt Atom = 0x19a02 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 + S Atom = 0x2501 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x20605 + Sortable Atom = 0x65108 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 + Tbody Atom = 0x2705 + Td Atom = 0x9202 + Template Atom = 0x71408 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 + Th Atom = 0x15602 + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d + U Atom = 0xb01 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x12f03 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x8b08, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0x11505, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x30205, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e80a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5590a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup + 0x62: 0x16502, // is + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30d02, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x9c04, // rows + 0x76: 0x63c06, // select + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4b010, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26806, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0x10a07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21e04, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0xb604, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0x5d04, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x35105, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x19d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0xaa07, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2f507, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x30703, // img + 0x104: 0x101, // b + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload + 0x160: 0x71408, // template + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src + 0x165: 0x6dd06, // strong + 0x167: 0x7804, // samp + 0x168: 0x6f307, // address + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus + 0x174: 0xc05, // param + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title + 0x183: 0x6ff05, // style + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe + 0x187: 0x1004, // main + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2db04, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x4204, // time + 0x19e: 0x67402, // ms + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3cd07, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x6607, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2d702, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2ae0e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 000000000..13bed1599 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 000000000..a3a918f0b --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,112 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. + "keygen": true, + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "math": + switch element.Data { + case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": + return true + } + case "svg": + switch element.Data { + case "foreignObject", "desc", "title": + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 000000000..822ed42a0 --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 000000000..c484e5a94 --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 000000000..b628880a0 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 000000000..d85613962 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 000000000..01477a963 --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.6.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.6.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.6.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 000000000..633ee15dc --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,220 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + scopeMarkerNode +) + +// Section 12.2.4.3 says "The markers are inserted when entering applet, +// object, marquee, template, td, th, and caption elements, and are used +// to prevent formatting from "leaking" into applet, object, marquee, +// template, td, th, and caption elements". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// contains returns whether a is within s. +func (s *nodeStack) contains(a atom.Atom) bool { + for _, n := range *s { + if n.DataAtom == a && n.Namespace == "" { + return true + } + } + return false +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} + +type insertionModeStack []insertionMode + +func (s *insertionModeStack) pop() (im insertionMode) { + i := len(*s) + im = (*s)[i-1] + *s = (*s)[:i-1] + return im +} + +func (s *insertionModeStack) top() insertionMode { + if i := len(*s); i > 0 { + return (*s)[i-1] + } + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 000000000..992cff2a3 --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2417 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.4.2) and active formatting + // elements (section 12.2.4.3). + oe, afe nodeStack + // Element pointers (section 12.2.4.4). + head, form *Node + // Other parsing state flags (section 12.2.4.5). + scripting, framesetOK bool + // The stack of template insertion modes + templateStack insertionModeStack + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.6.1). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.4.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.4.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.6.1, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev, template *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + var j int + for j = len(p.oe) - 1; j >= 0; j-- { + if p.oe[j].DataAtom == a.Template { + template = p.oe[j] + break + } + } + + if template != nil && (table == nil || j > i) { + template.AppendChild(n) + return + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.4.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.4.3. +func (p *parser) clearActiveFormattingElements() { + for { + n := p.afe.pop() + if len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.4.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.5. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.4.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.4.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.4.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + last := i == 0 + if last && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + if !last { + for ancestor, first := n, p.oe[0]; ancestor != first; { + ancestor = p.oe[p.oe.index(ancestor)-1] + switch ancestor.DataAtom { + case a.Template: + p.im = inSelectIM + return + case a.Table: + p.im = inSelectInTableIM + return + } + } + } + p.im = inSelectIM + case a.Td, a.Th: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Template: + // TODO: remove this divergence from the HTML5 spec. + if n.Namespace != "" { + continue + } + p.im = p.templateStack.top() + case a.Head: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inHeadIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + if p.head == nil { + p.im = beforeHeadIM + } else { + p.im = afterHeadIM + } + default: + if last { + p.im = inBodyIM + return + } + continue + } + return + } +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.6.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.6.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.6.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Noscript: + p.addElement() + if p.scripting { + p.setOriginalIM() + p.im = textIM + } else { + p.im = inHeadNoscriptIM + } + return true + case a.Script, a.Title, a.Noframes, a.Style: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Head: + // Ignore the token. + return true + case a.Template: + p.addElement() + p.afe = append(p.afe, &scopeMarker) + p.framesetOK = false + p.im = inTemplateIM + p.templateStack = append(p.templateStack, inTemplateIM) + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + p.oe.pop() + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + case a.Template: + if !p.oe.contains(a.Template) { + return true + } + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.generateImpliedEndTags() + for i := len(p.oe) - 1; i >= 0; i-- { + if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template { + p.oe = p.oe[:i] + break + } + } + p.clearActiveFormattingElements() + p.templateStack.pop() + p.resetInsertionMode() + return true + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// 12.2.6.4.5. +func inHeadNoscriptIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: + return inHeadIM(p) + case a.Head, a.Noscript: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Noscript, a.Br: + default: + // Ignore the token. + return true + } + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) == 0 { + // It was all whitespace. + return inHeadIM(p) + } + case CommentToken: + return inHeadIM(p) + } + p.oe.pop() + if p.top().DataAtom != a.Head { + panic("html: the new current node will be a head element.") + } + p.im = inHeadIM + if p.tok.DataAtom == a.Noscript { + return true + } + return false +} + +// Section 12.2.6.4.6. +func afterHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Body: + p.addElement() + p.framesetOK = false + p.im = inBodyIM + return true + case a.Frameset: + p.addElement() + p.im = inFramesetIM + return true + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: + p.oe = append(p.oe, p.head) + defer p.oe.remove(p.head) + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Body, a.Html, a.Br: + // Drop down to creating an implied tag. + case a.Template: + return inHeadIM(p) + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.6.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			if p.oe.contains(a.Template) {
+				return true
+			}
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if p.oe.contains(a.Template) {
+				return true
+			}
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form != nil && !p.oe.contains(a.Template) {
+				// Ignore the token
+				return true
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			if !p.oe.contains(a.Template) {
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A, "a")
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr, "nobr")
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Isindex:
+			if p.form != nil {
+				// Ignore the token.
+				return true
+			}
+			action := ""
+			prompt := "This is a searchable index. Enter search keywords: "
+			attr := []Attribute{{Key: "name", Val: "isindex"}}
+			for _, t := range p.tok.Attr {
+				switch t.Key {
+				case "action":
+					action = t.Val
+				case "name":
+					// Ignore the attribute.
+				case "prompt":
+					prompt = t.Val
+				default:
+					attr = append(attr, t)
+				}
+			}
+			p.acknowledgeSelfClosingTag()
+			p.popUntil(buttonScope, a.P)
+			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+			if p.form == nil {
+				// NOTE: The 'isindex' element has been removed,
+				// and the 'template' element has not been designed to be
+				// collaborative with the index element.
+				//
+				// Ignore the token.
+				return true
+			}
+			if action != "" {
+				p.form.Attr = []Attribute{{Key: "action", Val: action}}
+			}
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+			p.addText(prompt)
+			p.addChild(&Node{
+				Type:     ElementNode,
+				DataAtom: a.Input,
+				Data:     a.Input.String(),
+				Attr:     attr,
+			})
+			p.oe.pop()
+			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Iframe:
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Noembed, a.Noscript:
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rb, a.Rtc:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags("rtc")
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			if p.oe.contains(a.Template) {
+				i := p.indexOfElementInScope(defaultScope, a.Form)
+				if i == -1 {
+					// Ignore the token.
+					return true
+				}
+				p.generateImpliedEndTags()
+				if p.oe[i].DataAtom != a.Form {
+					// Ignore the token.
+					return true
+				}
+				p.popUntil(defaultScope, a.Form)
+			} else {
+				node := p.form
+				p.form = nil
+				i := p.indexOfElementInScope(defaultScope, a.Form)
+				if node == nil || i == -1 || p.oe[i] != node {
+					// Ignore the token.
+					return true
+				}
+				p.generateImpliedEndTags()
+				p.oe.remove(node)
+			}
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		case a.Template:
+			return inHeadIM(p)
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	case ErrorToken:
+		// TODO: remove this divergence from the HTML5 spec.
+		if len(p.templateStack) > 0 {
+			p.im = inTemplateIM
+			return false
+		} else {
+			for _, e := range p.oe {
+				switch e.DataAtom {
+				case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
+					a.Thead, a.Tr, a.Body, a.Html:
+				default:
+					return true
+				}
+			}
+		}
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-4. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 5. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom, tagName)
+			return
+		}
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Steps 9-10. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 11-12. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 13. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Steps 13.1-13.2
+		for j := 0; j < 3; j++ {
+			// Step 13.3.
+			x--
+			node = p.oe[x]
+			// Step 13.4 - 13.5.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 13.6.
+			if node == formattingElement {
+				break
+			}
+			// Step 13.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 13.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 13.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 13.10.
+			lastNode = node
+		}
+
+		// Step 14. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 15-17. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 18. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 19. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		// Two element nodes have the same tag if they have the same Data (a
+		// string-typed field). As an optimization, for common HTML tags, each
+		// Data string is assigned a unique, non-zero DataAtom (a uint32-typed
+		// field), since integer comparison is faster than string comparison.
+		// Uncommon (custom) tags get a zero DataAtom.
+		//
+		// The if condition here is equivalent to (p.oe[i].Data == tagName).
+		if (p.oe[i].DataAtom == tagAtom) &&
+			((tagAtom != 0) || (p.oe[i].Data == tagName)) {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.6.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a