Merge pull request #22643 from hashicorp/version-tools
Version tools per Go convention under tools.go
This commit is contained in:
commit
ec90bed62d
|
@ -149,17 +149,6 @@ Some files in the Terraform CLI codebase are generated. In most cases, we
|
||||||
update these using `go generate`, which is the standard way to encapsulate
|
update these using `go generate`, which is the standard way to encapsulate
|
||||||
code generation steps in a Go codebase.
|
code generation steps in a Go codebase.
|
||||||
|
|
||||||
However, code generation often relies on external tools which must be correctly
|
|
||||||
installed before running `go generate`. Terraform's `Makefile` includes
|
|
||||||
a target to install the tools needed for `go generate`:
|
|
||||||
|
|
||||||
```
|
|
||||||
make tools
|
|
||||||
```
|
|
||||||
|
|
||||||
After the tools are installed successfully, you can run code generation for
|
|
||||||
all packages:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
go generate ./...
|
go generate ./...
|
||||||
```
|
```
|
||||||
|
|
12
Makefile
12
Makefile
|
@ -5,11 +5,6 @@ WEBSITE_REPO=github.com/hashicorp/terraform-website
|
||||||
|
|
||||||
default: test
|
default: test
|
||||||
|
|
||||||
tools:
|
|
||||||
GO111MODULE=off go get -u golang.org/x/tools/cmd/stringer
|
|
||||||
GO111MODULE=off go get -u golang.org/x/tools/cmd/cover
|
|
||||||
GO111MODULE=off go get -u github.com/golang/mock/mockgen
|
|
||||||
|
|
||||||
# bin generates the releaseable binaries for Terraform
|
# bin generates the releaseable binaries for Terraform
|
||||||
bin: fmtcheck generate
|
bin: fmtcheck generate
|
||||||
@TF_RELEASE=1 sh -c "'$(CURDIR)/scripts/build.sh'"
|
@TF_RELEASE=1 sh -c "'$(CURDIR)/scripts/build.sh'"
|
||||||
|
@ -62,9 +57,6 @@ testrace: fmtcheck generate
|
||||||
TF_ACC= go test -mod=vendor -race $(TEST) $(TESTARGS)
|
TF_ACC= go test -mod=vendor -race $(TEST) $(TESTARGS)
|
||||||
|
|
||||||
cover:
|
cover:
|
||||||
@go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \
|
|
||||||
go get -u golang.org/x/tools/cmd/cover; \
|
|
||||||
fi
|
|
||||||
go test $(TEST) -coverprofile=coverage.out
|
go test $(TEST) -coverprofile=coverage.out
|
||||||
go tool cover -html=coverage.out
|
go tool cover -html=coverage.out
|
||||||
rm coverage.out
|
rm coverage.out
|
||||||
|
@ -72,7 +64,7 @@ cover:
|
||||||
# generate runs `go generate` to build the dynamically generated
|
# generate runs `go generate` to build the dynamically generated
|
||||||
# source files, except the protobuf stubs which are built instead with
|
# source files, except the protobuf stubs which are built instead with
|
||||||
# "make protobuf".
|
# "make protobuf".
|
||||||
generate: tools
|
generate:
|
||||||
GOFLAGS=-mod=vendor go generate ./...
|
GOFLAGS=-mod=vendor go generate ./...
|
||||||
# go fmt doesn't support -mod=vendor but it still wants to populate the
|
# go fmt doesn't support -mod=vendor but it still wants to populate the
|
||||||
# module cache with everything in go.mod even though formatting requires
|
# module cache with everything in go.mod even though formatting requires
|
||||||
|
@ -147,4 +139,4 @@ endif
|
||||||
# under parallel conditions.
|
# under parallel conditions.
|
||||||
.NOTPARALLEL:
|
.NOTPARALLEL:
|
||||||
|
|
||||||
.PHONY: bin cover default dev e2etest fmt fmtcheck generate protobuf plugin-dev quickdev test-compile test testacc testrace tools vendor-status website website-test
|
.PHONY: bin cover default dev e2etest fmt fmtcheck generate protobuf plugin-dev quickdev test-compile test testacc testrace vendor-status website website-test
|
||||||
|
|
|
@ -253,7 +253,7 @@ func (r AbsResourceInstance) Less(o AbsResourceInstance) bool {
|
||||||
// resource lifecycle has a slightly different address format.
|
// resource lifecycle has a slightly different address format.
|
||||||
type ResourceMode rune
|
type ResourceMode rune
|
||||||
|
|
||||||
//go:generate stringer -type ResourceMode
|
//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// InvalidResourceMode is the zero value of ResourceMode and is not
|
// InvalidResourceMode is the zero value of ResourceMode and is not
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package local
|
package local
|
||||||
|
|
||||||
//go:generate stringer -type=countHookAction hook_count_action.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=countHookAction hook_count_action.go
|
||||||
|
|
||||||
type countHookAction byte
|
type countHookAction byte
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package backend
|
package backend
|
||||||
|
|
||||||
//go:generate stringer -type=OperationType operation_type.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=OperationType operation_type.go
|
||||||
|
|
||||||
// OperationType is an enum used with Operation to specify the operation
|
// OperationType is an enum used with Operation to specify the operation
|
||||||
// type to perform for Terraform.
|
// type to perform for Terraform.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
|
||||||
type ResourceMode int
|
type ResourceMode int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -83,7 +83,7 @@ type NestedBlock struct {
|
||||||
// blocks.
|
// blocks.
|
||||||
type NestingMode int
|
type NestingMode int
|
||||||
|
|
||||||
//go:generate stringer -type=NestingMode
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode
|
||||||
|
|
||||||
const (
|
const (
|
||||||
nestingModeInvalid NestingMode = iota
|
nestingModeInvalid NestingMode = iota
|
||||||
|
|
|
@ -118,7 +118,7 @@ type Connection struct {
|
||||||
// ProvisionerWhen is an enum for valid values for when to run provisioners.
|
// ProvisionerWhen is an enum for valid values for when to run provisioners.
|
||||||
type ProvisionerWhen int
|
type ProvisionerWhen int
|
||||||
|
|
||||||
//go:generate stringer -type ProvisionerWhen
|
//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ProvisionerWhenInvalid ProvisionerWhen = iota
|
ProvisionerWhenInvalid ProvisionerWhen = iota
|
||||||
|
@ -130,7 +130,7 @@ const (
|
||||||
// for provisioners.
|
// for provisioners.
|
||||||
type ProvisionerOnFailure int
|
type ProvisionerOnFailure int
|
||||||
|
|
||||||
//go:generate stringer -type ProvisionerOnFailure
|
//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
|
ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
|
||||||
|
|
|
@ -19,7 +19,7 @@ package configs
|
||||||
// TypeHintMap requires a type that could be converted to an object
|
// TypeHintMap requires a type that could be converted to an object
|
||||||
type VariableTypeHint rune
|
type VariableTypeHint rune
|
||||||
|
|
||||||
//go:generate stringer -type VariableTypeHint
|
//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint
|
||||||
|
|
||||||
// TypeHintNone indicates the absence of a type hint. Values specified in
|
// TypeHintNone indicates the absence of a type hint. Values specified in
|
||||||
// ambiguous contexts will be treated as literal strings, as if TypeHintString
|
// ambiguous contexts will be treated as literal strings, as if TypeHintString
|
||||||
|
|
1
go.mod
1
go.mod
|
@ -129,6 +129,7 @@ require (
|
||||||
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb
|
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa
|
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0
|
||||||
google.golang.org/api v0.9.0
|
google.golang.org/api v0.9.0
|
||||||
google.golang.org/grpc v1.21.1
|
google.golang.org/grpc v1.21.1
|
||||||
gopkg.in/ini.v1 v1.42.0 // indirect
|
gopkg.in/ini.v1 v1.42.0 // indirect
|
||||||
|
|
1
go.sum
1
go.sum
|
@ -491,6 +491,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
//go:generate stringer -type=getSource resource_data_get_source.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go
|
||||||
|
|
||||||
// getSource represents the level we want to get for a value (internally).
|
// getSource represents the level we want to get for a value (internally).
|
||||||
// Any source less than or equal to the level will be loaded (whichever
|
// Any source less than or equal to the level will be loaded (whichever
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
//go:generate stringer -type=ValueType valuetype.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go
|
||||||
|
|
||||||
// ValueType is an enum of the type that can be represented by a schema.
|
// ValueType is an enum of the type that can be represented by a schema.
|
||||||
type ValueType int
|
type ValueType int
|
||||||
|
|
|
@ -12,7 +12,7 @@ const (
|
||||||
Delete Action = '-'
|
Delete Action = '-'
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type Action
|
//go:generate go run golang.org/x/tools/cmd/stringer -type Action
|
||||||
|
|
||||||
// IsReplace returns true if the action is one of the two actions that
|
// IsReplace returns true if the action is one of the two actions that
|
||||||
// represents replacing an existing object with a new object:
|
// represents replacing an existing object with a new object:
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
//go:generate bash ./generate.sh
|
//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/hashicorp/terraform/internal/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer
|
||||||
|
|
||||||
package mock_tfplugin5
|
package mock_tfplugin5
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# mockgen is particularly sensitive about what mode we run it in
|
|
||||||
export GOFLAGS=""
|
|
||||||
export GO111MODULE=on
|
|
||||||
|
|
||||||
mockgen -destination mock.go github.com/hashicorp/terraform/internal/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer
|
|
|
@ -40,7 +40,7 @@ type ResourceInstanceObject struct {
|
||||||
// ObjectStatus represents the status of a RemoteObject.
|
// ObjectStatus represents the status of a RemoteObject.
|
||||||
type ObjectStatus rune
|
type ObjectStatus rune
|
||||||
|
|
||||||
//go:generate stringer -type ObjectStatus
|
//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ObjectReady is an object status for an object that is ready to use.
|
// ObjectReady is an object status for an object that is ready to use.
|
||||||
|
|
|
@ -175,7 +175,7 @@ const (
|
||||||
EachMap EachMode = 'M'
|
EachMap EachMode = 'M'
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type EachMode
|
//go:generate go run golang.org/x/tools/cmd/stringer -type EachMode
|
||||||
|
|
||||||
func eachModeForInstanceKey(key addrs.InstanceKey) EachMode {
|
func eachModeForInstanceKey(key addrs.InstanceKey) EachMode {
|
||||||
switch key.(type) {
|
switch key.(type) {
|
||||||
|
|
|
@ -129,7 +129,7 @@ func Export(mgr Reader) *statefile.File {
|
||||||
// is the receiver of that method and the "second" is the given argument.
|
// is the receiver of that method and the "second" is the given argument.
|
||||||
type SnapshotMetaRel rune
|
type SnapshotMetaRel rune
|
||||||
|
|
||||||
//go:generate stringer -type=SnapshotMetaRel
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=SnapshotMetaRel
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// SnapshotOlder indicates that two snapshots have a common lineage and
|
// SnapshotOlder indicates that two snapshots have a common lineage and
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package terraform
|
package terraform
|
||||||
|
|
||||||
//go:generate stringer -type=GraphType context_graph_type.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=GraphType context_graph_type.go
|
||||||
|
|
||||||
// GraphType is an enum of the type of graph to create with a Context.
|
// GraphType is an enum of the type of graph to create with a Context.
|
||||||
// The values of the constants may change so they shouldn't be depended on;
|
// The values of the constants may change so they shouldn't be depended on;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package terraform
|
package terraform
|
||||||
|
|
||||||
//go:generate stringer -type=walkOperation graph_walk_operation.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go
|
||||||
|
|
||||||
// walkOperation is an enum which tells the walkContext what to do.
|
// walkOperation is an enum which tells the walkContext what to do.
|
||||||
type walkOperation byte
|
type walkOperation byte
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package terraform
|
package terraform
|
||||||
|
|
||||||
//go:generate stringer -type=InstanceType instancetype.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go
|
||||||
|
|
||||||
// InstanceType is an enum of the various types of instances store in the State
|
// InstanceType is an enum of the various types of instances store in the State
|
||||||
type InstanceType int
|
type InstanceType int
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
package terraform
|
package terraform
|
||||||
|
|
||||||
//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
|
||||||
|
|
||||||
// ResourceMode is deprecated, use addrs.ResourceMode instead.
|
// ResourceMode is deprecated, use addrs.ResourceMode instead.
|
||||||
// It has been preserved for backwards compatibility.
|
// It has been preserved for backwards compatibility.
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (v ValueSourceType) GoString() string {
|
||||||
return fmt.Sprintf("terraform.%s", v)
|
return fmt.Sprintf("terraform.%s", v)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate stringer -type ValueSourceType
|
//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType
|
||||||
|
|
||||||
// InputValues is a map of InputValue instances.
|
// InputValues is a map of InputValue instances.
|
||||||
type InputValues map[string]*InputValue
|
type InputValues map[string]*InputValue
|
||||||
|
|
|
@ -17,7 +17,7 @@ type Diagnostic interface {
|
||||||
|
|
||||||
type Severity rune
|
type Severity rune
|
||||||
|
|
||||||
//go:generate stringer -type=Severity
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Error Severity = 'E'
|
Error Severity = 'E'
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
// +build tools
|
||||||
|
|
||||||
|
package tools
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/golang/mock/mockgen"
|
||||||
|
_ "golang.org/x/tools/cmd/cover"
|
||||||
|
_ "golang.org/x/tools/cmd/stringer"
|
||||||
|
)
|
|
@ -0,0 +1,588 @@
|
||||||
|
// Copyright 2010 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// MockGen generates mock implementations of Go interfaces.
|
||||||
|
package main
|
||||||
|
|
||||||
|
// TODO: This does not support recursive embedded interfaces.
|
||||||
|
// TODO: This does not support embedding package-local interfaces in a separate file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/build"
|
||||||
|
"go/format"
|
||||||
|
"go/token"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/golang/mock/mockgen/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
gomockImportPath = "github.com/golang/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
source = flag.String("source", "", "(source mode) Input Go source file; enables source mode.")
|
||||||
|
destination = flag.String("destination", "", "Output file; defaults to stdout.")
|
||||||
|
mockNames = flag.String("mock_names", "", "Comma-separated interfaceName=mockName pairs of explicit mock names to use. Mock names default to 'Mock'+ interfaceName suffix.")
|
||||||
|
packageOut = flag.String("package", "", "Package of the generated code; defaults to the package of the input with a 'mock_' prefix.")
|
||||||
|
selfPackage = flag.String("self_package", "", "The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.")
|
||||||
|
writePkgComment = flag.Bool("write_package_comment", true, "Writes package documentation comment (godoc) if true.")
|
||||||
|
copyrightFile = flag.String("copyright_file", "", "Copyright file used to add copyright header")
|
||||||
|
|
||||||
|
debugParser = flag.Bool("debug_parser", false, "Print out parser results only.")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = usage
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
var pkg *model.Package
|
||||||
|
var err error
|
||||||
|
if *source != "" {
|
||||||
|
pkg, err = parseFile(*source)
|
||||||
|
} else {
|
||||||
|
if flag.NArg() != 2 {
|
||||||
|
usage()
|
||||||
|
log.Fatal("Expected exactly two arguments")
|
||||||
|
}
|
||||||
|
pkg, err = reflect(flag.Arg(0), strings.Split(flag.Arg(1), ","))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Loading input failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *debugParser {
|
||||||
|
pkg.Print(os.Stdout)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dst := os.Stdout
|
||||||
|
if len(*destination) > 0 {
|
||||||
|
if err := os.MkdirAll(filepath.Dir(*destination), os.ModePerm); err != nil {
|
||||||
|
log.Fatalf("Unable to create directory: %v", err)
|
||||||
|
}
|
||||||
|
f, err := os.Create(*destination)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed opening destination file: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
dst = f
|
||||||
|
}
|
||||||
|
|
||||||
|
packageName := *packageOut
|
||||||
|
if packageName == "" {
|
||||||
|
// pkg.Name in reflect mode is the base name of the import path,
|
||||||
|
// which might have characters that are illegal to have in package names.
|
||||||
|
packageName = "mock_" + sanitize(pkg.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// outputPackagePath represents the fully qualified name of the package of
|
||||||
|
// the generated code. Its purposes are to prevent the module from importing
|
||||||
|
// itself and to prevent qualifying type names that come from its own
|
||||||
|
// package (i.e. if there is a type called X then we want to print "X" not
|
||||||
|
// "package.X" since "package" is this package). This can happen if the mock
|
||||||
|
// is output into an already existing package.
|
||||||
|
outputPackagePath := *selfPackage
|
||||||
|
if len(outputPackagePath) == 0 && len(*destination) > 0 {
|
||||||
|
dst, _ := filepath.Abs(filepath.Dir(*destination))
|
||||||
|
for _, prefix := range build.Default.SrcDirs() {
|
||||||
|
if strings.HasPrefix(dst, prefix) {
|
||||||
|
if rel, err := filepath.Rel(prefix, dst); err == nil {
|
||||||
|
outputPackagePath = rel
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
g := new(generator)
|
||||||
|
if *source != "" {
|
||||||
|
g.filename = *source
|
||||||
|
} else {
|
||||||
|
g.srcPackage = flag.Arg(0)
|
||||||
|
g.srcInterfaces = flag.Arg(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *mockNames != "" {
|
||||||
|
g.mockNames = parseMockNames(*mockNames)
|
||||||
|
}
|
||||||
|
if *copyrightFile != "" {
|
||||||
|
header, err := ioutil.ReadFile(*copyrightFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed reading copyright file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
g.copyrightHeader = string(header)
|
||||||
|
}
|
||||||
|
if err := g.Generate(pkg, packageName, outputPackagePath); err != nil {
|
||||||
|
log.Fatalf("Failed generating mock: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := dst.Write(g.Output()); err != nil {
|
||||||
|
log.Fatalf("Failed writing to destination: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMockNames(names string) map[string]string {
|
||||||
|
mocksMap := make(map[string]string)
|
||||||
|
for _, kv := range strings.Split(names, ",") {
|
||||||
|
parts := strings.SplitN(kv, "=", 2)
|
||||||
|
if len(parts) != 2 || parts[1] == "" {
|
||||||
|
log.Fatalf("bad mock names spec: %v", kv)
|
||||||
|
}
|
||||||
|
mocksMap[parts[0]] = parts[1]
|
||||||
|
}
|
||||||
|
return mocksMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
io.WriteString(os.Stderr, usageText)
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
|
||||||
|
const usageText = `mockgen has two modes of operation: source and reflect.
|
||||||
|
|
||||||
|
Source mode generates mock interfaces from a source file.
|
||||||
|
It is enabled by using the -source flag. Other flags that
|
||||||
|
may be useful in this mode are -imports and -aux_files.
|
||||||
|
Example:
|
||||||
|
mockgen -source=foo.go [other options]
|
||||||
|
|
||||||
|
Reflect mode generates mock interfaces by building a program
|
||||||
|
that uses reflection to understand interfaces. It is enabled
|
||||||
|
by passing two non-flag arguments: an import path, and a
|
||||||
|
comma-separated list of symbols.
|
||||||
|
Example:
|
||||||
|
mockgen database/sql/driver Conn,Driver
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
type generator struct {
|
||||||
|
buf bytes.Buffer
|
||||||
|
indent string
|
||||||
|
mockNames map[string]string // may be empty
|
||||||
|
filename string // may be empty
|
||||||
|
srcPackage, srcInterfaces string // may be empty
|
||||||
|
copyrightHeader string
|
||||||
|
|
||||||
|
packageMap map[string]string // map from import path to package name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) p(format string, args ...interface{}) {
|
||||||
|
fmt.Fprintf(&g.buf, g.indent+format+"\n", args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) in() {
|
||||||
|
g.indent += "\t"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) out() {
|
||||||
|
if len(g.indent) > 0 {
|
||||||
|
g.indent = g.indent[0 : len(g.indent)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeDot(s string) string {
|
||||||
|
if len(s) > 0 && s[len(s)-1] == '.' {
|
||||||
|
return s[0 : len(s)-1]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanitize cleans up a string to make a suitable package name.
|
||||||
|
func sanitize(s string) string {
|
||||||
|
t := ""
|
||||||
|
for _, r := range s {
|
||||||
|
if t == "" {
|
||||||
|
if unicode.IsLetter(r) || r == '_' {
|
||||||
|
t += string(r)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
|
||||||
|
t += string(r)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t += "_"
|
||||||
|
}
|
||||||
|
if t == "_" {
|
||||||
|
t = "x"
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) Generate(pkg *model.Package, pkgName string, outputPackagePath string) error {
|
||||||
|
if pkgName != pkg.Name {
|
||||||
|
outputPackagePath = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if g.copyrightHeader != "" {
|
||||||
|
lines := strings.Split(g.copyrightHeader, "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
g.p("// %s", line)
|
||||||
|
}
|
||||||
|
g.p("")
|
||||||
|
}
|
||||||
|
|
||||||
|
g.p("// Code generated by MockGen. DO NOT EDIT.")
|
||||||
|
if g.filename != "" {
|
||||||
|
g.p("// Source: %v", g.filename)
|
||||||
|
} else {
|
||||||
|
g.p("// Source: %v (interfaces: %v)", g.srcPackage, g.srcInterfaces)
|
||||||
|
}
|
||||||
|
g.p("")
|
||||||
|
|
||||||
|
// Get all required imports, and generate unique names for them all.
|
||||||
|
im := pkg.Imports()
|
||||||
|
im[gomockImportPath] = true
|
||||||
|
|
||||||
|
// Only import reflect if it's used. We only use reflect in mocked methods
|
||||||
|
// so only import if any of the mocked interfaces have methods.
|
||||||
|
for _, intf := range pkg.Interfaces {
|
||||||
|
if len(intf.Methods) > 0 {
|
||||||
|
im["reflect"] = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort keys to make import alias generation predictable
|
||||||
|
sortedPaths := make([]string, len(im), len(im))
|
||||||
|
x := 0
|
||||||
|
for pth := range im {
|
||||||
|
sortedPaths[x] = pth
|
||||||
|
x++
|
||||||
|
}
|
||||||
|
sort.Strings(sortedPaths)
|
||||||
|
|
||||||
|
g.packageMap = make(map[string]string, len(im))
|
||||||
|
localNames := make(map[string]bool, len(im))
|
||||||
|
for _, pth := range sortedPaths {
|
||||||
|
base := sanitize(path.Base(pth))
|
||||||
|
|
||||||
|
// Local names for an imported package can usually be the basename of the import path.
|
||||||
|
// A couple of situations don't permit that, such as duplicate local names
|
||||||
|
// (e.g. importing "html/template" and "text/template"), or where the basename is
|
||||||
|
// a keyword (e.g. "foo/case").
|
||||||
|
// try base0, base1, ...
|
||||||
|
pkgName := base
|
||||||
|
i := 0
|
||||||
|
for localNames[pkgName] || token.Lookup(pkgName).IsKeyword() {
|
||||||
|
pkgName = base + strconv.Itoa(i)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
g.packageMap[pth] = pkgName
|
||||||
|
localNames[pkgName] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if *writePkgComment {
|
||||||
|
g.p("// Package %v is a generated GoMock package.", pkgName)
|
||||||
|
}
|
||||||
|
g.p("package %v", pkgName)
|
||||||
|
g.p("")
|
||||||
|
g.p("import (")
|
||||||
|
g.in()
|
||||||
|
for path, pkg := range g.packageMap {
|
||||||
|
if path == outputPackagePath {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
g.p("%v %q", pkg, path)
|
||||||
|
}
|
||||||
|
for _, path := range pkg.DotImports {
|
||||||
|
g.p(". %q", path)
|
||||||
|
}
|
||||||
|
g.out()
|
||||||
|
g.p(")")
|
||||||
|
|
||||||
|
for _, intf := range pkg.Interfaces {
|
||||||
|
if err := g.GenerateMockInterface(intf, outputPackagePath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The name of the mock type to use for the given interface identifier.
|
||||||
|
func (g *generator) mockName(typeName string) string {
|
||||||
|
if mockName, ok := g.mockNames[typeName]; ok {
|
||||||
|
return mockName
|
||||||
|
}
|
||||||
|
|
||||||
|
return "Mock" + typeName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePath string) error {
|
||||||
|
mockType := g.mockName(intf.Name)
|
||||||
|
|
||||||
|
g.p("")
|
||||||
|
g.p("// %v is a mock of %v interface", mockType, intf.Name)
|
||||||
|
g.p("type %v struct {", mockType)
|
||||||
|
g.in()
|
||||||
|
g.p("ctrl *gomock.Controller")
|
||||||
|
g.p("recorder *%vMockRecorder", mockType)
|
||||||
|
g.out()
|
||||||
|
g.p("}")
|
||||||
|
g.p("")
|
||||||
|
|
||||||
|
g.p("// %vMockRecorder is the mock recorder for %v", mockType, mockType)
|
||||||
|
g.p("type %vMockRecorder struct {", mockType)
|
||||||
|
g.in()
|
||||||
|
g.p("mock *%v", mockType)
|
||||||
|
g.out()
|
||||||
|
g.p("}")
|
||||||
|
g.p("")
|
||||||
|
|
||||||
|
// TODO: Re-enable this if we can import the interface reliably.
|
||||||
|
//g.p("// Verify that the mock satisfies the interface at compile time.")
|
||||||
|
//g.p("var _ %v = (*%v)(nil)", typeName, mockType)
|
||||||
|
//g.p("")
|
||||||
|
|
||||||
|
g.p("// New%v creates a new mock instance", mockType)
|
||||||
|
g.p("func New%v(ctrl *gomock.Controller) *%v {", mockType, mockType)
|
||||||
|
g.in()
|
||||||
|
g.p("mock := &%v{ctrl: ctrl}", mockType)
|
||||||
|
g.p("mock.recorder = &%vMockRecorder{mock}", mockType)
|
||||||
|
g.p("return mock")
|
||||||
|
g.out()
|
||||||
|
g.p("}")
|
||||||
|
g.p("")
|
||||||
|
|
||||||
|
// XXX: possible name collision here if someone has EXPECT in their interface.
|
||||||
|
g.p("// EXPECT returns an object that allows the caller to indicate expected use")
|
||||||
|
g.p("func (m *%v) EXPECT() *%vMockRecorder {", mockType, mockType)
|
||||||
|
g.in()
|
||||||
|
g.p("return m.recorder")
|
||||||
|
g.out()
|
||||||
|
g.p("}")
|
||||||
|
|
||||||
|
g.GenerateMockMethods(mockType, intf, outputPackagePath)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) GenerateMockMethods(mockType string, intf *model.Interface, pkgOverride string) {
|
||||||
|
for _, m := range intf.Methods {
|
||||||
|
g.p("")
|
||||||
|
g.GenerateMockMethod(mockType, m, pkgOverride)
|
||||||
|
g.p("")
|
||||||
|
g.GenerateMockRecorderMethod(mockType, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeArgString(argNames, argTypes []string) string {
|
||||||
|
args := make([]string, len(argNames))
|
||||||
|
for i, name := range argNames {
|
||||||
|
// specify the type only once for consecutive args of the same type
|
||||||
|
if i+1 < len(argTypes) && argTypes[i] == argTypes[i+1] {
|
||||||
|
args[i] = name
|
||||||
|
} else {
|
||||||
|
args[i] = name + " " + argTypes[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(args, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateMockMethod generates a mock method implementation.
|
||||||
|
// If non-empty, pkgOverride is the package in which unqualified types reside.
|
||||||
|
func (g *generator) GenerateMockMethod(mockType string, m *model.Method, pkgOverride string) error {
|
||||||
|
argNames := g.getArgNames(m)
|
||||||
|
argTypes := g.getArgTypes(m, pkgOverride)
|
||||||
|
argString := makeArgString(argNames, argTypes)
|
||||||
|
|
||||||
|
rets := make([]string, len(m.Out))
|
||||||
|
for i, p := range m.Out {
|
||||||
|
rets[i] = p.Type.String(g.packageMap, pkgOverride)
|
||||||
|
}
|
||||||
|
retString := strings.Join(rets, ", ")
|
||||||
|
if len(rets) > 1 {
|
||||||
|
retString = "(" + retString + ")"
|
||||||
|
}
|
||||||
|
if retString != "" {
|
||||||
|
retString = " " + retString
|
||||||
|
}
|
||||||
|
|
||||||
|
ia := newIdentifierAllocator(argNames)
|
||||||
|
idRecv := ia.allocateIdentifier("m")
|
||||||
|
|
||||||
|
g.p("// %v mocks base method", m.Name)
|
||||||
|
g.p("func (%v *%v) %v(%v)%v {", idRecv, mockType, m.Name, argString, retString)
|
||||||
|
g.in()
|
||||||
|
g.p("%s.ctrl.T.Helper()", idRecv)
|
||||||
|
|
||||||
|
var callArgs string
|
||||||
|
if m.Variadic == nil {
|
||||||
|
if len(argNames) > 0 {
|
||||||
|
callArgs = ", " + strings.Join(argNames, ", ")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Non-trivial. The generated code must build a []interface{},
|
||||||
|
// but the variadic argument may be any type.
|
||||||
|
idVarArgs := ia.allocateIdentifier("varargs")
|
||||||
|
idVArg := ia.allocateIdentifier("a")
|
||||||
|
g.p("%s := []interface{}{%s}", idVarArgs, strings.Join(argNames[:len(argNames)-1], ", "))
|
||||||
|
g.p("for _, %s := range %s {", idVArg, argNames[len(argNames)-1])
|
||||||
|
g.in()
|
||||||
|
g.p("%s = append(%s, %s)", idVarArgs, idVarArgs, idVArg)
|
||||||
|
g.out()
|
||||||
|
g.p("}")
|
||||||
|
callArgs = ", " + idVarArgs + "..."
|
||||||
|
}
|
||||||
|
if len(m.Out) == 0 {
|
||||||
|
g.p(`%v.ctrl.Call(%v, %q%v)`, idRecv, idRecv, m.Name, callArgs)
|
||||||
|
} else {
|
||||||
|
idRet := ia.allocateIdentifier("ret")
|
||||||
|
g.p(`%v := %v.ctrl.Call(%v, %q%v)`, idRet, idRecv, idRecv, m.Name, callArgs)
|
||||||
|
|
||||||
|
// Go does not allow "naked" type assertions on nil values, so we use the two-value form here.
|
||||||
|
// The value of that is either (x.(T), true) or (Z, false), where Z is the zero value for T.
|
||||||
|
// Happily, this coincides with the semantics we want here.
|
||||||
|
retNames := make([]string, len(rets))
|
||||||
|
for i, t := range rets {
|
||||||
|
retNames[i] = ia.allocateIdentifier(fmt.Sprintf("ret%d", i))
|
||||||
|
g.p("%s, _ := %s[%d].(%s)", retNames[i], idRet, i, t)
|
||||||
|
}
|
||||||
|
g.p("return " + strings.Join(retNames, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
g.out()
|
||||||
|
g.p("}")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) GenerateMockRecorderMethod(mockType string, m *model.Method) error {
|
||||||
|
argNames := g.getArgNames(m)
|
||||||
|
|
||||||
|
var argString string
|
||||||
|
if m.Variadic == nil {
|
||||||
|
argString = strings.Join(argNames, ", ")
|
||||||
|
} else {
|
||||||
|
argString = strings.Join(argNames[:len(argNames)-1], ", ")
|
||||||
|
}
|
||||||
|
if argString != "" {
|
||||||
|
argString += " interface{}"
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Variadic != nil {
|
||||||
|
if argString != "" {
|
||||||
|
argString += ", "
|
||||||
|
}
|
||||||
|
argString += fmt.Sprintf("%s ...interface{}", argNames[len(argNames)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
ia := newIdentifierAllocator(argNames)
|
||||||
|
idRecv := ia.allocateIdentifier("mr")
|
||||||
|
|
||||||
|
g.p("// %v indicates an expected call of %v", m.Name, m.Name)
|
||||||
|
g.p("func (%s *%vMockRecorder) %v(%v) *gomock.Call {", idRecv, mockType, m.Name, argString)
|
||||||
|
g.in()
|
||||||
|
g.p("%s.mock.ctrl.T.Helper()", idRecv)
|
||||||
|
|
||||||
|
var callArgs string
|
||||||
|
if m.Variadic == nil {
|
||||||
|
if len(argNames) > 0 {
|
||||||
|
callArgs = ", " + strings.Join(argNames, ", ")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(argNames) == 1 {
|
||||||
|
// Easy: just use ... to push the arguments through.
|
||||||
|
callArgs = ", " + argNames[0] + "..."
|
||||||
|
} else {
|
||||||
|
// Hard: create a temporary slice.
|
||||||
|
idVarArgs := ia.allocateIdentifier("varargs")
|
||||||
|
g.p("%s := append([]interface{}{%s}, %s...)",
|
||||||
|
idVarArgs,
|
||||||
|
strings.Join(argNames[:len(argNames)-1], ", "),
|
||||||
|
argNames[len(argNames)-1])
|
||||||
|
callArgs = ", " + idVarArgs + "..."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g.p(`return %s.mock.ctrl.RecordCallWithMethodType(%s.mock, "%s", reflect.TypeOf((*%s)(nil).%s)%s)`, idRecv, idRecv, m.Name, mockType, m.Name, callArgs)
|
||||||
|
|
||||||
|
g.out()
|
||||||
|
g.p("}")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) getArgNames(m *model.Method) []string {
|
||||||
|
argNames := make([]string, len(m.In))
|
||||||
|
for i, p := range m.In {
|
||||||
|
name := p.Name
|
||||||
|
if name == "" {
|
||||||
|
name = fmt.Sprintf("arg%d", i)
|
||||||
|
}
|
||||||
|
argNames[i] = name
|
||||||
|
}
|
||||||
|
if m.Variadic != nil {
|
||||||
|
name := m.Variadic.Name
|
||||||
|
if name == "" {
|
||||||
|
name = fmt.Sprintf("arg%d", len(m.In))
|
||||||
|
}
|
||||||
|
argNames = append(argNames, name)
|
||||||
|
}
|
||||||
|
return argNames
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) getArgTypes(m *model.Method, pkgOverride string) []string {
|
||||||
|
argTypes := make([]string, len(m.In))
|
||||||
|
for i, p := range m.In {
|
||||||
|
argTypes[i] = p.Type.String(g.packageMap, pkgOverride)
|
||||||
|
}
|
||||||
|
if m.Variadic != nil {
|
||||||
|
argTypes = append(argTypes, "..."+m.Variadic.Type.String(g.packageMap, pkgOverride))
|
||||||
|
}
|
||||||
|
return argTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
type identifierAllocator map[string]struct{}
|
||||||
|
|
||||||
|
func newIdentifierAllocator(taken []string) identifierAllocator {
|
||||||
|
a := make(identifierAllocator, len(taken))
|
||||||
|
for _, s := range taken {
|
||||||
|
a[s] = struct{}{}
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o identifierAllocator) allocateIdentifier(want string) string {
|
||||||
|
id := want
|
||||||
|
for i := 2; ; i++ {
|
||||||
|
if _, ok := o[id]; !ok {
|
||||||
|
o[id] = struct{}{}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
id = want + "_" + strconv.Itoa(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output returns the generator's output, formatted in the standard Go style.
|
||||||
|
func (g *generator) Output() []byte {
|
||||||
|
src, err := format.Source(g.buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to format generated source code: %s\n%s", err, g.buf.String())
|
||||||
|
}
|
||||||
|
return src
|
||||||
|
}
|
|
@ -0,0 +1,461 @@
|
||||||
|
// Copyright 2012 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package model contains the data model necessary for generating mock implementations.
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// pkgPath is the importable path for package model
|
||||||
|
const pkgPath = "github.com/golang/mock/mockgen/model"
|
||||||
|
|
||||||
|
// Package is a Go package. It may be a subset.
|
||||||
|
type Package struct {
|
||||||
|
Name string
|
||||||
|
Interfaces []*Interface
|
||||||
|
DotImports []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pkg *Package) Print(w io.Writer) {
|
||||||
|
fmt.Fprintf(w, "package %s\n", pkg.Name)
|
||||||
|
for _, intf := range pkg.Interfaces {
|
||||||
|
intf.Print(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Imports returns the imports needed by the Package as a set of import paths.
|
||||||
|
func (pkg *Package) Imports() map[string]bool {
|
||||||
|
im := make(map[string]bool)
|
||||||
|
for _, intf := range pkg.Interfaces {
|
||||||
|
intf.addImports(im)
|
||||||
|
}
|
||||||
|
return im
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interface is a Go interface.
|
||||||
|
type Interface struct {
|
||||||
|
Name string
|
||||||
|
Methods []*Method
|
||||||
|
}
|
||||||
|
|
||||||
|
func (intf *Interface) Print(w io.Writer) {
|
||||||
|
fmt.Fprintf(w, "interface %s\n", intf.Name)
|
||||||
|
for _, m := range intf.Methods {
|
||||||
|
m.Print(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (intf *Interface) addImports(im map[string]bool) {
|
||||||
|
for _, m := range intf.Methods {
|
||||||
|
m.addImports(im)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Method is a single method of an interface.
|
||||||
|
type Method struct {
|
||||||
|
Name string
|
||||||
|
In, Out []*Parameter
|
||||||
|
Variadic *Parameter // may be nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Method) Print(w io.Writer) {
|
||||||
|
fmt.Fprintf(w, " - method %s\n", m.Name)
|
||||||
|
if len(m.In) > 0 {
|
||||||
|
fmt.Fprintf(w, " in:\n")
|
||||||
|
for _, p := range m.In {
|
||||||
|
p.Print(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.Variadic != nil {
|
||||||
|
fmt.Fprintf(w, " ...:\n")
|
||||||
|
m.Variadic.Print(w)
|
||||||
|
}
|
||||||
|
if len(m.Out) > 0 {
|
||||||
|
fmt.Fprintf(w, " out:\n")
|
||||||
|
for _, p := range m.Out {
|
||||||
|
p.Print(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Method) addImports(im map[string]bool) {
|
||||||
|
for _, p := range m.In {
|
||||||
|
p.Type.addImports(im)
|
||||||
|
}
|
||||||
|
if m.Variadic != nil {
|
||||||
|
m.Variadic.Type.addImports(im)
|
||||||
|
}
|
||||||
|
for _, p := range m.Out {
|
||||||
|
p.Type.addImports(im)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parameter is an argument or return parameter of a method.
|
||||||
|
type Parameter struct {
|
||||||
|
Name string // may be empty
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parameter) Print(w io.Writer) {
|
||||||
|
n := p.Name
|
||||||
|
if n == "" {
|
||||||
|
n = `""`
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type is a Go type.
|
||||||
|
type Type interface {
|
||||||
|
String(pm map[string]string, pkgOverride string) string
|
||||||
|
addImports(im map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&ArrayType{})
|
||||||
|
gob.Register(&ChanType{})
|
||||||
|
gob.Register(&FuncType{})
|
||||||
|
gob.Register(&MapType{})
|
||||||
|
gob.Register(&NamedType{})
|
||||||
|
gob.Register(&PointerType{})
|
||||||
|
|
||||||
|
// Call gob.RegisterName to make sure it has the consistent name registered
|
||||||
|
// for both gob decoder and encoder.
|
||||||
|
//
|
||||||
|
// For a non-pointer type, gob.Register will try to get package full path by
|
||||||
|
// calling rt.PkgPath() for a name to register. If your project has vendor
|
||||||
|
// directory, it is possible that PkgPath will get a path like this:
|
||||||
|
// ../../../vendor/github.com/golang/mock/mockgen/model
|
||||||
|
gob.RegisterName(pkgPath+".PredeclaredType", PredeclaredType(""))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArrayType is an array or slice type.
|
||||||
|
type ArrayType struct {
|
||||||
|
Len int // -1 for slices, >= 0 for arrays
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (at *ArrayType) String(pm map[string]string, pkgOverride string) string {
|
||||||
|
s := "[]"
|
||||||
|
if at.Len > -1 {
|
||||||
|
s = fmt.Sprintf("[%d]", at.Len)
|
||||||
|
}
|
||||||
|
return s + at.Type.String(pm, pkgOverride)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) }
|
||||||
|
|
||||||
|
// ChanType is a channel type.
|
||||||
|
type ChanType struct {
|
||||||
|
Dir ChanDir // 0, 1 or 2
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ct *ChanType) String(pm map[string]string, pkgOverride string) string {
|
||||||
|
s := ct.Type.String(pm, pkgOverride)
|
||||||
|
if ct.Dir == RecvDir {
|
||||||
|
return "<-chan " + s
|
||||||
|
}
|
||||||
|
if ct.Dir == SendDir {
|
||||||
|
return "chan<- " + s
|
||||||
|
}
|
||||||
|
return "chan " + s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) }
|
||||||
|
|
||||||
|
// ChanDir is a channel direction.
|
||||||
|
type ChanDir int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RecvDir ChanDir = 1
|
||||||
|
SendDir ChanDir = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// FuncType is a function type.
|
||||||
|
type FuncType struct {
|
||||||
|
In, Out []*Parameter
|
||||||
|
Variadic *Parameter // may be nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ft *FuncType) String(pm map[string]string, pkgOverride string) string {
|
||||||
|
args := make([]string, len(ft.In))
|
||||||
|
for i, p := range ft.In {
|
||||||
|
args[i] = p.Type.String(pm, pkgOverride)
|
||||||
|
}
|
||||||
|
if ft.Variadic != nil {
|
||||||
|
args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride))
|
||||||
|
}
|
||||||
|
rets := make([]string, len(ft.Out))
|
||||||
|
for i, p := range ft.Out {
|
||||||
|
rets[i] = p.Type.String(pm, pkgOverride)
|
||||||
|
}
|
||||||
|
retString := strings.Join(rets, ", ")
|
||||||
|
if nOut := len(ft.Out); nOut == 1 {
|
||||||
|
retString = " " + retString
|
||||||
|
} else if nOut > 1 {
|
||||||
|
retString = " (" + retString + ")"
|
||||||
|
}
|
||||||
|
return "func(" + strings.Join(args, ", ") + ")" + retString
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ft *FuncType) addImports(im map[string]bool) {
|
||||||
|
for _, p := range ft.In {
|
||||||
|
p.Type.addImports(im)
|
||||||
|
}
|
||||||
|
if ft.Variadic != nil {
|
||||||
|
ft.Variadic.Type.addImports(im)
|
||||||
|
}
|
||||||
|
for _, p := range ft.Out {
|
||||||
|
p.Type.addImports(im)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapType is a map type.
|
||||||
|
type MapType struct {
|
||||||
|
Key, Value Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *MapType) String(pm map[string]string, pkgOverride string) string {
|
||||||
|
return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mt *MapType) addImports(im map[string]bool) {
|
||||||
|
mt.Key.addImports(im)
|
||||||
|
mt.Value.addImports(im)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamedType is an exported type in a package.
|
||||||
|
type NamedType struct {
|
||||||
|
Package string // may be empty
|
||||||
|
Type string // TODO: should this be typed Type?
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nt *NamedType) String(pm map[string]string, pkgOverride string) string {
|
||||||
|
// TODO: is this right?
|
||||||
|
if pkgOverride == nt.Package {
|
||||||
|
return nt.Type
|
||||||
|
}
|
||||||
|
prefix := pm[nt.Package]
|
||||||
|
if prefix != "" {
|
||||||
|
return prefix + "." + nt.Type
|
||||||
|
} else {
|
||||||
|
return nt.Type
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (nt *NamedType) addImports(im map[string]bool) {
|
||||||
|
if nt.Package != "" {
|
||||||
|
im[nt.Package] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PointerType is a pointer to another type.
|
||||||
|
type PointerType struct {
|
||||||
|
Type Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt *PointerType) String(pm map[string]string, pkgOverride string) string {
|
||||||
|
return "*" + pt.Type.String(pm, pkgOverride)
|
||||||
|
}
|
||||||
|
func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) }
|
||||||
|
|
||||||
|
// PredeclaredType is a predeclared type such as "int".
|
||||||
|
type PredeclaredType string
|
||||||
|
|
||||||
|
func (pt PredeclaredType) String(pm map[string]string, pkgOverride string) string { return string(pt) }
|
||||||
|
func (pt PredeclaredType) addImports(im map[string]bool) {}
|
||||||
|
|
||||||
|
// The following code is intended to be called by the program generated by ../reflect.go.
|
||||||
|
|
||||||
|
func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) {
|
||||||
|
if it.Kind() != reflect.Interface {
|
||||||
|
return nil, fmt.Errorf("%v is not an interface", it)
|
||||||
|
}
|
||||||
|
intf := &Interface{}
|
||||||
|
|
||||||
|
for i := 0; i < it.NumMethod(); i++ {
|
||||||
|
mt := it.Method(i)
|
||||||
|
// TODO: need to skip unexported methods? or just raise an error?
|
||||||
|
m := &Method{
|
||||||
|
Name: mt.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
intf.Methods = append(intf.Methods, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
return intf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// t's Kind must be a reflect.Func.
|
||||||
|
func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) {
|
||||||
|
nin := t.NumIn()
|
||||||
|
if t.IsVariadic() {
|
||||||
|
nin--
|
||||||
|
}
|
||||||
|
var p *Parameter
|
||||||
|
for i := 0; i < nin; i++ {
|
||||||
|
p, err = parameterFromType(t.In(i))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
in = append(in, p)
|
||||||
|
}
|
||||||
|
if t.IsVariadic() {
|
||||||
|
p, err = parameterFromType(t.In(nin).Elem())
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
variadic = p
|
||||||
|
}
|
||||||
|
for i := 0; i < t.NumOut(); i++ {
|
||||||
|
p, err = parameterFromType(t.Out(i))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parameterFromType(t reflect.Type) (*Parameter, error) {
|
||||||
|
tt, err := typeFromType(t)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Parameter{Type: tt}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||||
|
|
||||||
|
var byteType = reflect.TypeOf(byte(0))
|
||||||
|
|
||||||
|
func typeFromType(t reflect.Type) (Type, error) {
|
||||||
|
// Hack workaround for https://golang.org/issue/3853.
|
||||||
|
// This explicit check should not be necessary.
|
||||||
|
if t == byteType {
|
||||||
|
return PredeclaredType("byte"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if imp := t.PkgPath(); imp != "" {
|
||||||
|
return &NamedType{
|
||||||
|
Package: impPath(imp),
|
||||||
|
Type: t.Name(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// only unnamed or predeclared types after here
|
||||||
|
|
||||||
|
// Lots of types have element types. Let's do the parsing and error checking for all of them.
|
||||||
|
var elemType Type
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||||
|
var err error
|
||||||
|
elemType, err = typeFromType(t.Elem())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Array:
|
||||||
|
return &ArrayType{
|
||||||
|
Len: t.Len(),
|
||||||
|
Type: elemType,
|
||||||
|
}, nil
|
||||||
|
case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||||
|
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String:
|
||||||
|
return PredeclaredType(t.Kind().String()), nil
|
||||||
|
case reflect.Chan:
|
||||||
|
var dir ChanDir
|
||||||
|
switch t.ChanDir() {
|
||||||
|
case reflect.RecvDir:
|
||||||
|
dir = RecvDir
|
||||||
|
case reflect.SendDir:
|
||||||
|
dir = SendDir
|
||||||
|
}
|
||||||
|
return &ChanType{
|
||||||
|
Dir: dir,
|
||||||
|
Type: elemType,
|
||||||
|
}, nil
|
||||||
|
case reflect.Func:
|
||||||
|
in, variadic, out, err := funcArgsFromType(t)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &FuncType{
|
||||||
|
In: in,
|
||||||
|
Out: out,
|
||||||
|
Variadic: variadic,
|
||||||
|
}, nil
|
||||||
|
case reflect.Interface:
|
||||||
|
// Two special interfaces.
|
||||||
|
if t.NumMethod() == 0 {
|
||||||
|
return PredeclaredType("interface{}"), nil
|
||||||
|
}
|
||||||
|
if t == errorType {
|
||||||
|
return PredeclaredType("error"), nil
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
kt, err := typeFromType(t.Key())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &MapType{
|
||||||
|
Key: kt,
|
||||||
|
Value: elemType,
|
||||||
|
}, nil
|
||||||
|
case reflect.Ptr:
|
||||||
|
return &PointerType{
|
||||||
|
Type: elemType,
|
||||||
|
}, nil
|
||||||
|
case reflect.Slice:
|
||||||
|
return &ArrayType{
|
||||||
|
Len: -1,
|
||||||
|
Type: elemType,
|
||||||
|
}, nil
|
||||||
|
case reflect.Struct:
|
||||||
|
if t.NumField() == 0 {
|
||||||
|
return PredeclaredType("struct{}"), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Struct, UnsafePointer
|
||||||
|
return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind())
|
||||||
|
}
|
||||||
|
|
||||||
|
// impPath sanitizes the package path returned by `PkgPath` method of a reflect Type so that
|
||||||
|
// it is importable. PkgPath might return a path that includes "vendor". These paths do not
|
||||||
|
// compile, so we need to remove everything up to and including "/vendor/".
|
||||||
|
// See https://github.com/golang/go/issues/12019.
|
||||||
|
func impPath(imp string) string {
|
||||||
|
if strings.HasPrefix(imp, "vendor/") {
|
||||||
|
imp = "/" + imp
|
||||||
|
}
|
||||||
|
if i := strings.LastIndex(imp, "/vendor/"); i != -1 {
|
||||||
|
imp = imp[i+len("/vendor/"):]
|
||||||
|
}
|
||||||
|
return imp
|
||||||
|
}
|
|
@ -0,0 +1,523 @@
|
||||||
|
// Copyright 2012 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This file contains the model construction by parsing source files.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/build"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"log"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/mock/mockgen/model"
|
||||||
|
"golang.org/x/tools/go/packages"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.")
|
||||||
|
auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: simplify error reporting
|
||||||
|
|
||||||
|
func parseFile(source string) (*model.Package, error) {
|
||||||
|
srcDir, err := filepath.Abs(filepath.Dir(source))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed getting source directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &packages.Config{Mode: packages.LoadSyntax, Tests: true}
|
||||||
|
pkgs, err := packages.Load(cfg, "file="+source)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if packages.PrintErrors(pkgs) > 0 || len(pkgs) == 0 {
|
||||||
|
return nil, errors.New("loading package failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
packageImport := pkgs[0].PkgPath
|
||||||
|
|
||||||
|
// It is illegal to import a _test package.
|
||||||
|
packageImport = strings.TrimSuffix(packageImport, "_test")
|
||||||
|
|
||||||
|
fs := token.NewFileSet()
|
||||||
|
file, err := parser.ParseFile(fs, source, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed parsing source file %v: %v", source, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &fileParser{
|
||||||
|
fileSet: fs,
|
||||||
|
imports: make(map[string]string),
|
||||||
|
importedInterfaces: make(map[string]map[string]*ast.InterfaceType),
|
||||||
|
auxInterfaces: make(map[string]map[string]*ast.InterfaceType),
|
||||||
|
srcDir: srcDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle -imports.
|
||||||
|
dotImports := make(map[string]bool)
|
||||||
|
if *imports != "" {
|
||||||
|
for _, kv := range strings.Split(*imports, ",") {
|
||||||
|
eq := strings.Index(kv, "=")
|
||||||
|
k, v := kv[:eq], kv[eq+1:]
|
||||||
|
if k == "." {
|
||||||
|
// TODO: Catch dupes?
|
||||||
|
dotImports[v] = true
|
||||||
|
} else {
|
||||||
|
// TODO: Catch dupes?
|
||||||
|
p.imports[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle -aux_files.
|
||||||
|
if err := p.parseAuxFiles(*auxFiles); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.addAuxInterfacesFromFile(packageImport, file) // this file
|
||||||
|
|
||||||
|
pkg, err := p.parseFile(packageImport, file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for path := range dotImports {
|
||||||
|
pkg.DotImports = append(pkg.DotImports, path)
|
||||||
|
}
|
||||||
|
return pkg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileParser struct {
|
||||||
|
fileSet *token.FileSet
|
||||||
|
imports map[string]string // package name => import path
|
||||||
|
importedInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
|
||||||
|
|
||||||
|
auxFiles []*ast.File
|
||||||
|
auxInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
|
||||||
|
|
||||||
|
srcDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *fileParser) errorf(pos token.Pos, format string, args ...interface{}) error {
|
||||||
|
ps := p.fileSet.Position(pos)
|
||||||
|
format = "%s:%d:%d: " + format
|
||||||
|
args = append([]interface{}{ps.Filename, ps.Line, ps.Column}, args...)
|
||||||
|
return fmt.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *fileParser) parseAuxFiles(auxFiles string) error {
|
||||||
|
auxFiles = strings.TrimSpace(auxFiles)
|
||||||
|
if auxFiles == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, kv := range strings.Split(auxFiles, ",") {
|
||||||
|
parts := strings.SplitN(kv, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return fmt.Errorf("bad aux file spec: %v", kv)
|
||||||
|
}
|
||||||
|
pkg, fpath := parts[0], parts[1]
|
||||||
|
|
||||||
|
file, err := parser.ParseFile(p.fileSet, fpath, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.auxFiles = append(p.auxFiles, file)
|
||||||
|
p.addAuxInterfacesFromFile(pkg, file)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *fileParser) addAuxInterfacesFromFile(pkg string, file *ast.File) {
|
||||||
|
if _, ok := p.auxInterfaces[pkg]; !ok {
|
||||||
|
p.auxInterfaces[pkg] = make(map[string]*ast.InterfaceType)
|
||||||
|
}
|
||||||
|
for ni := range iterInterfaces(file) {
|
||||||
|
p.auxInterfaces[pkg][ni.name.Name] = ni.it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseFile loads all file imports and auxiliary files import into the
|
||||||
|
// fileParser, parses all file interfaces and returns package model.
|
||||||
|
func (p *fileParser) parseFile(importPath string, file *ast.File) (*model.Package, error) {
|
||||||
|
allImports, dotImports := importsOfFile(file)
|
||||||
|
// Don't stomp imports provided by -imports. Those should take precedence.
|
||||||
|
for pkg, path := range allImports {
|
||||||
|
if _, ok := p.imports[pkg]; !ok {
|
||||||
|
p.imports[pkg] = path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add imports from auxiliary files, which might be needed for embedded interfaces.
|
||||||
|
// Don't stomp any other imports.
|
||||||
|
for _, f := range p.auxFiles {
|
||||||
|
auxImports, _ := importsOfFile(f)
|
||||||
|
for pkg, path := range auxImports {
|
||||||
|
if _, ok := p.imports[pkg]; !ok {
|
||||||
|
p.imports[pkg] = path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var is []*model.Interface
|
||||||
|
for ni := range iterInterfaces(file) {
|
||||||
|
i, err := p.parseInterface(ni.name.String(), importPath, ni.it)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
is = append(is, i)
|
||||||
|
}
|
||||||
|
return &model.Package{
|
||||||
|
Name: file.Name.String(),
|
||||||
|
Interfaces: is,
|
||||||
|
DotImports: dotImports,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePackage loads package specified by path, parses it and populates
|
||||||
|
// corresponding imports and importedInterfaces into the fileParser.
|
||||||
|
func (p *fileParser) parsePackage(path string) error {
|
||||||
|
var pkgs map[string]*ast.Package
|
||||||
|
if imp, err := build.Import(path, p.srcDir, build.FindOnly); err != nil {
|
||||||
|
return err
|
||||||
|
} else if pkgs, err = parser.ParseDir(p.fileSet, imp.Dir, nil, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, pkg := range pkgs {
|
||||||
|
file := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates|ast.FilterUnassociatedComments|ast.FilterImportDuplicates)
|
||||||
|
if _, ok := p.importedInterfaces[path]; !ok {
|
||||||
|
p.importedInterfaces[path] = make(map[string]*ast.InterfaceType)
|
||||||
|
}
|
||||||
|
for ni := range iterInterfaces(file) {
|
||||||
|
p.importedInterfaces[path][ni.name.Name] = ni.it
|
||||||
|
}
|
||||||
|
imports, _ := importsOfFile(file)
|
||||||
|
for pkgName, pkgPath := range imports {
|
||||||
|
if _, ok := p.imports[pkgName]; !ok {
|
||||||
|
p.imports[pkgName] = pkgPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*model.Interface, error) {
|
||||||
|
intf := &model.Interface{Name: name}
|
||||||
|
for _, field := range it.Methods.List {
|
||||||
|
switch v := field.Type.(type) {
|
||||||
|
case *ast.FuncType:
|
||||||
|
if nn := len(field.Names); nn != 1 {
|
||||||
|
return nil, fmt.Errorf("expected one name for interface %v, got %d", intf.Name, nn)
|
||||||
|
}
|
||||||
|
m := &model.Method{
|
||||||
|
Name: field.Names[0].String(),
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
m.In, m.Variadic, m.Out, err = p.parseFunc(pkg, v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
intf.Methods = append(intf.Methods, m)
|
||||||
|
case *ast.Ident:
|
||||||
|
// Embedded interface in this package.
|
||||||
|
ei := p.auxInterfaces[pkg][v.String()]
|
||||||
|
if ei == nil {
|
||||||
|
if ei = p.importedInterfaces[pkg][v.String()]; ei == nil {
|
||||||
|
return nil, p.errorf(v.Pos(), "unknown embedded interface %s", v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
eintf, err := p.parseInterface(v.String(), pkg, ei)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Copy the methods.
|
||||||
|
// TODO: apply shadowing rules.
|
||||||
|
for _, m := range eintf.Methods {
|
||||||
|
intf.Methods = append(intf.Methods, m)
|
||||||
|
}
|
||||||
|
case *ast.SelectorExpr:
|
||||||
|
// Embedded interface in another package.
|
||||||
|
fpkg, sel := v.X.(*ast.Ident).String(), v.Sel.String()
|
||||||
|
epkg, ok := p.imports[fpkg]
|
||||||
|
if !ok {
|
||||||
|
return nil, p.errorf(v.X.Pos(), "unknown package %s", fpkg)
|
||||||
|
}
|
||||||
|
ei := p.auxInterfaces[fpkg][sel]
|
||||||
|
if ei == nil {
|
||||||
|
fpkg = epkg
|
||||||
|
if _, ok = p.importedInterfaces[epkg]; !ok {
|
||||||
|
if err := p.parsePackage(epkg); err != nil {
|
||||||
|
return nil, p.errorf(v.Pos(), "could not parse package %s: %v", fpkg, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ei = p.importedInterfaces[epkg][sel]; ei == nil {
|
||||||
|
return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", fpkg, sel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
eintf, err := p.parseInterface(sel, fpkg, ei)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Copy the methods.
|
||||||
|
// TODO: apply shadowing rules.
|
||||||
|
for _, m := range eintf.Methods {
|
||||||
|
intf.Methods = append(intf.Methods, m)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return intf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *fileParser) parseFunc(pkg string, f *ast.FuncType) (in []*model.Parameter, variadic *model.Parameter, out []*model.Parameter, err error) {
|
||||||
|
if f.Params != nil {
|
||||||
|
regParams := f.Params.List
|
||||||
|
if isVariadic(f) {
|
||||||
|
n := len(regParams)
|
||||||
|
varParams := regParams[n-1:]
|
||||||
|
regParams = regParams[:n-1]
|
||||||
|
vp, err := p.parseFieldList(pkg, varParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, p.errorf(varParams[0].Pos(), "failed parsing variadic argument: %v", err)
|
||||||
|
}
|
||||||
|
variadic = vp[0]
|
||||||
|
}
|
||||||
|
in, err = p.parseFieldList(pkg, regParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, p.errorf(f.Pos(), "failed parsing arguments: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.Results != nil {
|
||||||
|
out, err = p.parseFieldList(pkg, f.Results.List)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, p.errorf(f.Pos(), "failed parsing returns: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *fileParser) parseFieldList(pkg string, fields []*ast.Field) ([]*model.Parameter, error) {
|
||||||
|
nf := 0
|
||||||
|
for _, f := range fields {
|
||||||
|
nn := len(f.Names)
|
||||||
|
if nn == 0 {
|
||||||
|
nn = 1 // anonymous parameter
|
||||||
|
}
|
||||||
|
nf += nn
|
||||||
|
}
|
||||||
|
if nf == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
ps := make([]*model.Parameter, nf)
|
||||||
|
i := 0 // destination index
|
||||||
|
for _, f := range fields {
|
||||||
|
t, err := p.parseType(pkg, f.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(f.Names) == 0 {
|
||||||
|
// anonymous arg
|
||||||
|
ps[i] = &model.Parameter{Type: t}
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, name := range f.Names {
|
||||||
|
ps[i] = &model.Parameter{Name: name.Name, Type: t}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ps, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) {
|
||||||
|
switch v := typ.(type) {
|
||||||
|
case *ast.ArrayType:
|
||||||
|
ln := -1
|
||||||
|
if v.Len != nil {
|
||||||
|
x, err := strconv.Atoi(v.Len.(*ast.BasicLit).Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, p.errorf(v.Len.Pos(), "bad array size: %v", err)
|
||||||
|
}
|
||||||
|
ln = x
|
||||||
|
}
|
||||||
|
t, err := p.parseType(pkg, v.Elt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.ArrayType{Len: ln, Type: t}, nil
|
||||||
|
case *ast.ChanType:
|
||||||
|
t, err := p.parseType(pkg, v.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var dir model.ChanDir
|
||||||
|
if v.Dir == ast.SEND {
|
||||||
|
dir = model.SendDir
|
||||||
|
}
|
||||||
|
if v.Dir == ast.RECV {
|
||||||
|
dir = model.RecvDir
|
||||||
|
}
|
||||||
|
return &model.ChanType{Dir: dir, Type: t}, nil
|
||||||
|
case *ast.Ellipsis:
|
||||||
|
// assume we're parsing a variadic argument
|
||||||
|
return p.parseType(pkg, v.Elt)
|
||||||
|
case *ast.FuncType:
|
||||||
|
in, variadic, out, err := p.parseFunc(pkg, v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.FuncType{In: in, Out: out, Variadic: variadic}, nil
|
||||||
|
case *ast.Ident:
|
||||||
|
if v.IsExported() {
|
||||||
|
// `pkg` may be an aliased imported pkg
|
||||||
|
// if so, patch the import w/ the fully qualified import
|
||||||
|
maybeImportedPkg, ok := p.imports[pkg]
|
||||||
|
if ok {
|
||||||
|
pkg = maybeImportedPkg
|
||||||
|
}
|
||||||
|
// assume type in this package
|
||||||
|
return &model.NamedType{Package: pkg, Type: v.Name}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assume predeclared type
|
||||||
|
return model.PredeclaredType(v.Name), nil
|
||||||
|
case *ast.InterfaceType:
|
||||||
|
if v.Methods != nil && len(v.Methods.List) > 0 {
|
||||||
|
return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed interface types")
|
||||||
|
}
|
||||||
|
return model.PredeclaredType("interface{}"), nil
|
||||||
|
case *ast.MapType:
|
||||||
|
key, err := p.parseType(pkg, v.Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
value, err := p.parseType(pkg, v.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.MapType{Key: key, Value: value}, nil
|
||||||
|
case *ast.SelectorExpr:
|
||||||
|
pkgName := v.X.(*ast.Ident).String()
|
||||||
|
pkg, ok := p.imports[pkgName]
|
||||||
|
if !ok {
|
||||||
|
return nil, p.errorf(v.Pos(), "unknown package %q", pkgName)
|
||||||
|
}
|
||||||
|
return &model.NamedType{Package: pkg, Type: v.Sel.String()}, nil
|
||||||
|
case *ast.StarExpr:
|
||||||
|
t, err := p.parseType(pkg, v.X)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.PointerType{Type: t}, nil
|
||||||
|
case *ast.StructType:
|
||||||
|
if v.Fields != nil && len(v.Fields.List) > 0 {
|
||||||
|
return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed struct types")
|
||||||
|
}
|
||||||
|
return model.PredeclaredType("struct{}"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("don't know how to parse type %T", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// importsOfFile returns a map of package name to import path
|
||||||
|
// of the imports in file.
|
||||||
|
func importsOfFile(file *ast.File) (normalImports map[string]string, dotImports []string) {
|
||||||
|
normalImports = make(map[string]string)
|
||||||
|
dotImports = make([]string, 0)
|
||||||
|
for _, is := range file.Imports {
|
||||||
|
var pkgName string
|
||||||
|
importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes
|
||||||
|
|
||||||
|
if is.Name != nil {
|
||||||
|
// Named imports are always certain.
|
||||||
|
if is.Name.Name == "_" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pkgName = is.Name.Name
|
||||||
|
} else {
|
||||||
|
pkg, err := build.Import(importPath, "", 0)
|
||||||
|
if err != nil {
|
||||||
|
// Fallback to import path suffix. Note that this is uncertain.
|
||||||
|
_, last := path.Split(importPath)
|
||||||
|
// If the last path component has dots, the first dot-delimited
|
||||||
|
// field is used as the name.
|
||||||
|
pkgName = strings.SplitN(last, ".", 2)[0]
|
||||||
|
} else {
|
||||||
|
pkgName = pkg.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkgName == "." {
|
||||||
|
dotImports = append(dotImports, importPath)
|
||||||
|
} else {
|
||||||
|
|
||||||
|
if _, ok := normalImports[pkgName]; ok {
|
||||||
|
log.Fatalf("imported package collision: %q imported twice", pkgName)
|
||||||
|
}
|
||||||
|
normalImports[pkgName] = importPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type namedInterface struct {
|
||||||
|
name *ast.Ident
|
||||||
|
it *ast.InterfaceType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an iterator over all interfaces in file.
|
||||||
|
func iterInterfaces(file *ast.File) <-chan namedInterface {
|
||||||
|
ch := make(chan namedInterface)
|
||||||
|
go func() {
|
||||||
|
for _, decl := range file.Decls {
|
||||||
|
gd, ok := decl.(*ast.GenDecl)
|
||||||
|
if !ok || gd.Tok != token.TYPE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, spec := range gd.Specs {
|
||||||
|
ts, ok := spec.(*ast.TypeSpec)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
it, ok := ts.Type.(*ast.InterfaceType)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- namedInterface{ts.Name, it}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// isVariadic returns whether the function is variadic.
|
||||||
|
func isVariadic(f *ast.FuncType) bool {
|
||||||
|
nargs := len(f.Params.List)
|
||||||
|
if nargs == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := f.Params.List[nargs-1].Type.(*ast.Ellipsis)
|
||||||
|
return ok
|
||||||
|
}
|
|
@ -0,0 +1,243 @@
|
||||||
|
// Copyright 2012 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// This file contains the model construction by reflection.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/gob"
|
||||||
|
"flag"
|
||||||
|
"go/build"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/golang/mock/mockgen/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
progOnly = flag.Bool("prog_only", false, "(reflect mode) Only generate the reflection program; write it to stdout and exit.")
|
||||||
|
execOnly = flag.String("exec_only", "", "(reflect mode) If set, execute this reflection program.")
|
||||||
|
buildFlags = flag.String("build_flags", "", "(reflect mode) Additional flags for go build.")
|
||||||
|
)
|
||||||
|
|
||||||
|
func writeProgram(importPath string, symbols []string) ([]byte, error) {
|
||||||
|
var program bytes.Buffer
|
||||||
|
data := reflectData{
|
||||||
|
ImportPath: importPath,
|
||||||
|
Symbols: symbols,
|
||||||
|
}
|
||||||
|
if err := reflectProgram.Execute(&program, &data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return program.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the given program and parse the output as a model.Package.
|
||||||
|
func run(program string) (*model.Package, error) {
|
||||||
|
f, err := ioutil.TempFile("", "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := f.Name()
|
||||||
|
defer os.Remove(filename)
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the program.
|
||||||
|
cmd := exec.Command(program, "-output", filename)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err = os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process output.
|
||||||
|
var pkg model.Package
|
||||||
|
if err := gob.NewDecoder(f).Decode(&pkg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &pkg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// runInDir writes the given program into the given dir, runs it there, and
|
||||||
|
// parses the output as a model.Package.
|
||||||
|
func runInDir(program []byte, dir string) (*model.Package, error) {
|
||||||
|
// We use TempDir instead of TempFile so we can control the filename.
|
||||||
|
tmpDir, err := ioutil.TempDir(dir, "gomock_reflect_")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := os.RemoveAll(tmpDir); err != nil {
|
||||||
|
log.Printf("failed to remove temp directory: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
const progSource = "prog.go"
|
||||||
|
var progBinary = "prog.bin"
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
// Windows won't execute a program unless it has a ".exe" suffix.
|
||||||
|
progBinary += ".exe"
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ioutil.WriteFile(filepath.Join(tmpDir, progSource), program, 0600); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmdArgs := []string{}
|
||||||
|
cmdArgs = append(cmdArgs, "build")
|
||||||
|
if *buildFlags != "" {
|
||||||
|
cmdArgs = append(cmdArgs, *buildFlags)
|
||||||
|
}
|
||||||
|
cmdArgs = append(cmdArgs, "-o", progBinary, progSource)
|
||||||
|
|
||||||
|
// Build the program.
|
||||||
|
cmd := exec.Command("go", cmdArgs...)
|
||||||
|
cmd.Dir = tmpDir
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return run(filepath.Join(tmpDir, progBinary))
|
||||||
|
}
|
||||||
|
|
||||||
|
func reflect(importPath string, symbols []string) (*model.Package, error) {
|
||||||
|
// TODO: sanity check arguments
|
||||||
|
|
||||||
|
if *execOnly != "" {
|
||||||
|
return run(*execOnly)
|
||||||
|
}
|
||||||
|
|
||||||
|
program, err := writeProgram(importPath, symbols)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if *progOnly {
|
||||||
|
os.Stdout.Write(program)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
wd, _ := os.Getwd()
|
||||||
|
|
||||||
|
// Try to run the program in the same directory as the input package.
|
||||||
|
if p, err := build.Import(importPath, wd, build.FindOnly); err == nil {
|
||||||
|
dir := p.Dir
|
||||||
|
if p, err := runInDir(program, dir); err == nil {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since that didn't work, try to run it in the current working directory.
|
||||||
|
if p, err := runInDir(program, wd); err == nil {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
// Since that didn't work, try to run it in a standard temp directory.
|
||||||
|
return runInDir(program, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
type reflectData struct {
|
||||||
|
ImportPath string
|
||||||
|
Symbols []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// This program reflects on an interface value, and prints the
|
||||||
|
// gob encoding of a model.Package to standard output.
|
||||||
|
// JSON doesn't work because of the model.Type interface.
|
||||||
|
var reflectProgram = template.Must(template.New("program").Parse(`
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/golang/mock/mockgen/model"
|
||||||
|
|
||||||
|
pkg_ {{printf "%q" .ImportPath}}
|
||||||
|
)
|
||||||
|
|
||||||
|
var output = flag.String("output", "", "The output file name, or empty to use stdout.")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
its := []struct{
|
||||||
|
sym string
|
||||||
|
typ reflect.Type
|
||||||
|
}{
|
||||||
|
{{range .Symbols}}
|
||||||
|
{ {{printf "%q" .}}, reflect.TypeOf((*pkg_.{{.}})(nil)).Elem()},
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
pkg := &model.Package{
|
||||||
|
// NOTE: This behaves contrary to documented behaviour if the
|
||||||
|
// package name is not the final component of the import path.
|
||||||
|
// The reflect package doesn't expose the package name, though.
|
||||||
|
Name: path.Base({{printf "%q" .ImportPath}}),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, it := range its {
|
||||||
|
intf, err := model.InterfaceFromInterfaceType(it.typ)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Reflection: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
intf.Name = it.sym
|
||||||
|
pkg.Interfaces = append(pkg.Interfaces, intf)
|
||||||
|
}
|
||||||
|
|
||||||
|
outfile := os.Stdout
|
||||||
|
if len(*output) != 0 {
|
||||||
|
var err error
|
||||||
|
outfile, err = os.Create(*output)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "failed to open output file %q", *output)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := outfile.Close(); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "failed to close output file %q", *output)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := gob.NewEncoder(outfile).Encode(pkg); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "gob encode: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`))
|
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code refers to The Go Authors for copyright purposes.
|
||||||
|
# The master list of authors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -0,0 +1,3 @@
|
||||||
|
# This source code was written by the Go contributors.
|
||||||
|
# The master list of contributors is in the main Go distribution,
|
||||||
|
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,22 @@
|
||||||
|
Additional IP Rights Grant (Patents)
|
||||||
|
|
||||||
|
"This implementation" means the copyrightable works distributed by
|
||||||
|
Google as part of the Go project.
|
||||||
|
|
||||||
|
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||||
|
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||||
|
patent license to make, have made, use, offer to sell, sell, import,
|
||||||
|
transfer and otherwise run, modify and propagate the contents of this
|
||||||
|
implementation of Go, where such license applies only to those patent
|
||||||
|
claims, both currently owned or controlled by Google and acquired in
|
||||||
|
the future, licensable by Google that are necessarily infringed by this
|
||||||
|
implementation of Go. This grant does not include claims that would be
|
||||||
|
infringed only as a consequence of further modification of this
|
||||||
|
implementation. If you or your agent or exclusive licensee institute or
|
||||||
|
order or agree to the institution of patent litigation against any
|
||||||
|
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||||
|
that this implementation of Go or any code incorporated within this
|
||||||
|
implementation of Go constitutes direct or contributory patent
|
||||||
|
infringement, or inducement of patent infringement, then any patent
|
||||||
|
rights granted to you under this License for this implementation of Go
|
||||||
|
shall terminate as of the date such litigation is filed.
|
|
@ -0,0 +1,2 @@
|
||||||
|
NOTE: For Go releases 1.5 and later, this tool lives in the
|
||||||
|
standard repository. The code here is not maintained.
|
|
@ -0,0 +1,722 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/parser"
|
||||||
|
"go/printer"
|
||||||
|
"go/token"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const usageMessage = "" +
|
||||||
|
`Usage of 'go tool cover':
|
||||||
|
Given a coverage profile produced by 'go test':
|
||||||
|
go test -coverprofile=c.out
|
||||||
|
|
||||||
|
Open a web browser displaying annotated source code:
|
||||||
|
go tool cover -html=c.out
|
||||||
|
|
||||||
|
Write out an HTML file instead of launching a web browser:
|
||||||
|
go tool cover -html=c.out -o coverage.html
|
||||||
|
|
||||||
|
Display coverage percentages to stdout for each function:
|
||||||
|
go tool cover -func=c.out
|
||||||
|
|
||||||
|
Finally, to generate modified source code with coverage annotations
|
||||||
|
(what go test -cover does):
|
||||||
|
go tool cover -mode=set -var=CoverageVariableName program.go
|
||||||
|
`
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
fmt.Fprintln(os.Stderr, usageMessage)
|
||||||
|
fmt.Fprintln(os.Stderr, "Flags:")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
fmt.Fprintln(os.Stderr, "\n Only one of -html, -func, or -mode may be set.")
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
mode = flag.String("mode", "", "coverage mode: set, count, atomic")
|
||||||
|
varVar = flag.String("var", "GoCover", "name of coverage variable to generate")
|
||||||
|
output = flag.String("o", "", "file for output; default: stdout")
|
||||||
|
htmlOut = flag.String("html", "", "generate HTML representation of coverage profile")
|
||||||
|
funcOut = flag.String("func", "", "output coverage profile information for each function")
|
||||||
|
)
|
||||||
|
|
||||||
|
var profile string // The profile to read; the value of -html or -func
|
||||||
|
|
||||||
|
var counterStmt func(*File, ast.Expr) ast.Stmt
|
||||||
|
|
||||||
|
const (
|
||||||
|
atomicPackagePath = "sync/atomic"
|
||||||
|
atomicPackageName = "_cover_atomic_"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = usage
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
// Usage information when no arguments.
|
||||||
|
if flag.NFlag() == 0 && flag.NArg() == 0 {
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := parseFlags()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
fmt.Fprintln(os.Stderr, `For usage information, run "go tool cover -help"`)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate coverage-annotated source.
|
||||||
|
if *mode != "" {
|
||||||
|
annotate(flag.Arg(0))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output HTML or function coverage information.
|
||||||
|
if *htmlOut != "" {
|
||||||
|
err = htmlOutput(profile, *output)
|
||||||
|
} else {
|
||||||
|
err = funcOutput(profile, *output)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "cover: %v\n", err)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseFlags sets the profile and counterStmt globals and performs validations.
|
||||||
|
func parseFlags() error {
|
||||||
|
profile = *htmlOut
|
||||||
|
if *funcOut != "" {
|
||||||
|
if profile != "" {
|
||||||
|
return fmt.Errorf("too many options")
|
||||||
|
}
|
||||||
|
profile = *funcOut
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must either display a profile or rewrite Go source.
|
||||||
|
if (profile == "") == (*mode == "") {
|
||||||
|
return fmt.Errorf("too many options")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *mode != "" {
|
||||||
|
switch *mode {
|
||||||
|
case "set":
|
||||||
|
counterStmt = setCounterStmt
|
||||||
|
case "count":
|
||||||
|
counterStmt = incCounterStmt
|
||||||
|
case "atomic":
|
||||||
|
counterStmt = atomicCounterStmt
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown -mode %v", *mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if flag.NArg() == 0 {
|
||||||
|
return fmt.Errorf("missing source file")
|
||||||
|
} else if flag.NArg() == 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else if flag.NArg() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("too many arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block represents the information about a basic block to be recorded in the analysis.
|
||||||
|
// Note: Our definition of basic block is based on control structures; we don't break
|
||||||
|
// apart && and ||. We could but it doesn't seem important enough to bother.
|
||||||
|
type Block struct {
|
||||||
|
startByte token.Pos
|
||||||
|
endByte token.Pos
|
||||||
|
numStmt int
|
||||||
|
}
|
||||||
|
|
||||||
|
// File is a wrapper for the state of a file used in the parser.
|
||||||
|
// The basic parse tree walker is a method of this type.
|
||||||
|
type File struct {
|
||||||
|
fset *token.FileSet
|
||||||
|
name string // Name of file.
|
||||||
|
astFile *ast.File
|
||||||
|
blocks []Block
|
||||||
|
atomicPkg string // Package name for "sync/atomic" in this file.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit implements the ast.Visitor interface.
|
||||||
|
func (f *File) Visit(node ast.Node) ast.Visitor {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.BlockStmt:
|
||||||
|
// If it's a switch or select, the body is a list of case clauses; don't tag the block itself.
|
||||||
|
if len(n.List) > 0 {
|
||||||
|
switch n.List[0].(type) {
|
||||||
|
case *ast.CaseClause: // switch
|
||||||
|
for _, n := range n.List {
|
||||||
|
clause := n.(*ast.CaseClause)
|
||||||
|
clause.Body = f.addCounters(clause.Pos(), clause.End(), clause.Body, false)
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
case *ast.CommClause: // select
|
||||||
|
for _, n := range n.List {
|
||||||
|
clause := n.(*ast.CommClause)
|
||||||
|
clause.Body = f.addCounters(clause.Pos(), clause.End(), clause.Body, false)
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n.List = f.addCounters(n.Lbrace, n.Rbrace+1, n.List, true) // +1 to step past closing brace.
|
||||||
|
case *ast.IfStmt:
|
||||||
|
ast.Walk(f, n.Body)
|
||||||
|
if n.Else == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// The elses are special, because if we have
|
||||||
|
// if x {
|
||||||
|
// } else if y {
|
||||||
|
// }
|
||||||
|
// we want to cover the "if y". To do this, we need a place to drop the counter,
|
||||||
|
// so we add a hidden block:
|
||||||
|
// if x {
|
||||||
|
// } else {
|
||||||
|
// if y {
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
switch stmt := n.Else.(type) {
|
||||||
|
case *ast.IfStmt:
|
||||||
|
block := &ast.BlockStmt{
|
||||||
|
Lbrace: n.Body.End(), // Start at end of the "if" block so the covered part looks like it starts at the "else".
|
||||||
|
List: []ast.Stmt{stmt},
|
||||||
|
Rbrace: stmt.End(),
|
||||||
|
}
|
||||||
|
n.Else = block
|
||||||
|
case *ast.BlockStmt:
|
||||||
|
stmt.Lbrace = n.Body.End() // Start at end of the "if" block so the covered part looks like it starts at the "else".
|
||||||
|
default:
|
||||||
|
panic("unexpected node type in if")
|
||||||
|
}
|
||||||
|
ast.Walk(f, n.Else)
|
||||||
|
return nil
|
||||||
|
case *ast.SelectStmt:
|
||||||
|
// Don't annotate an empty select - creates a syntax error.
|
||||||
|
if n.Body == nil || len(n.Body.List) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case *ast.SwitchStmt:
|
||||||
|
// Don't annotate an empty switch - creates a syntax error.
|
||||||
|
if n.Body == nil || len(n.Body.List) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case *ast.TypeSwitchStmt:
|
||||||
|
// Don't annotate an empty type switch - creates a syntax error.
|
||||||
|
if n.Body == nil || len(n.Body.List) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// unquote returns the unquoted string.
|
||||||
|
func unquote(s string) string {
|
||||||
|
t, err := strconv.Unquote(s)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("cover: improperly quoted string %q\n", s)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// addImport adds an import for the specified path, if one does not already exist, and returns
|
||||||
|
// the local package name.
|
||||||
|
func (f *File) addImport(path string) string {
|
||||||
|
// Does the package already import it?
|
||||||
|
for _, s := range f.astFile.Imports {
|
||||||
|
if unquote(s.Path.Value) == path {
|
||||||
|
if s.Name != nil {
|
||||||
|
return s.Name.Name
|
||||||
|
}
|
||||||
|
return filepath.Base(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newImport := &ast.ImportSpec{
|
||||||
|
Name: ast.NewIdent(atomicPackageName),
|
||||||
|
Path: &ast.BasicLit{
|
||||||
|
Kind: token.STRING,
|
||||||
|
Value: fmt.Sprintf("%q", path),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
impDecl := &ast.GenDecl{
|
||||||
|
Tok: token.IMPORT,
|
||||||
|
Specs: []ast.Spec{
|
||||||
|
newImport,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Make the new import the first Decl in the file.
|
||||||
|
astFile := f.astFile
|
||||||
|
astFile.Decls = append(astFile.Decls, nil)
|
||||||
|
copy(astFile.Decls[1:], astFile.Decls[0:])
|
||||||
|
astFile.Decls[0] = impDecl
|
||||||
|
astFile.Imports = append(astFile.Imports, newImport)
|
||||||
|
|
||||||
|
// Now refer to the package, just in case it ends up unused.
|
||||||
|
// That is, append to the end of the file the declaration
|
||||||
|
// var _ = _cover_atomic_.AddUint32
|
||||||
|
reference := &ast.GenDecl{
|
||||||
|
Tok: token.VAR,
|
||||||
|
Specs: []ast.Spec{
|
||||||
|
&ast.ValueSpec{
|
||||||
|
Names: []*ast.Ident{
|
||||||
|
ast.NewIdent("_"),
|
||||||
|
},
|
||||||
|
Values: []ast.Expr{
|
||||||
|
&ast.SelectorExpr{
|
||||||
|
X: ast.NewIdent(atomicPackageName),
|
||||||
|
Sel: ast.NewIdent("AddUint32"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
astFile.Decls = append(astFile.Decls, reference)
|
||||||
|
return atomicPackageName
|
||||||
|
}
|
||||||
|
|
||||||
|
var slashslash = []byte("//")
|
||||||
|
|
||||||
|
// initialComments returns the prefix of content containing only
|
||||||
|
// whitespace and line comments. Any +build directives must appear
|
||||||
|
// within this region. This approach is more reliable than using
|
||||||
|
// go/printer to print a modified AST containing comments.
|
||||||
|
//
|
||||||
|
func initialComments(content []byte) []byte {
|
||||||
|
// Derived from go/build.Context.shouldBuild.
|
||||||
|
end := 0
|
||||||
|
p := content
|
||||||
|
for len(p) > 0 {
|
||||||
|
line := p
|
||||||
|
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||||
|
line, p = line[:i], p[i+1:]
|
||||||
|
} else {
|
||||||
|
p = p[len(p):]
|
||||||
|
}
|
||||||
|
line = bytes.TrimSpace(line)
|
||||||
|
if len(line) == 0 { // Blank line.
|
||||||
|
end = len(content) - len(p)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !bytes.HasPrefix(line, slashslash) { // Not comment line.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return content[:end]
|
||||||
|
}
|
||||||
|
|
||||||
|
func annotate(name string) {
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
content, err := ioutil.ReadFile(name)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("cover: %s: %s", name, err)
|
||||||
|
}
|
||||||
|
parsedFile, err := parser.ParseFile(fset, name, content, parser.ParseComments)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("cover: %s: %s", name, err)
|
||||||
|
}
|
||||||
|
parsedFile.Comments = trimComments(parsedFile, fset)
|
||||||
|
|
||||||
|
file := &File{
|
||||||
|
fset: fset,
|
||||||
|
name: name,
|
||||||
|
astFile: parsedFile,
|
||||||
|
}
|
||||||
|
if *mode == "atomic" {
|
||||||
|
file.atomicPkg = file.addImport(atomicPackagePath)
|
||||||
|
}
|
||||||
|
ast.Walk(file, file.astFile)
|
||||||
|
fd := os.Stdout
|
||||||
|
if *output != "" {
|
||||||
|
var err error
|
||||||
|
fd, err = os.Create(*output)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("cover: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fd.Write(initialComments(content)) // Retain '// +build' directives.
|
||||||
|
file.print(fd)
|
||||||
|
// After printing the source tree, add some declarations for the counters etc.
|
||||||
|
// We could do this by adding to the tree, but it's easier just to print the text.
|
||||||
|
file.addVariables(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// trimComments drops all but the //go: comments, some of which are semantically important.
|
||||||
|
// We drop all others because they can appear in places that cause our counters
|
||||||
|
// to appear in syntactically incorrect places. //go: appears at the beginning of
|
||||||
|
// the line and is syntactically safe.
|
||||||
|
func trimComments(file *ast.File, fset *token.FileSet) []*ast.CommentGroup {
|
||||||
|
var comments []*ast.CommentGroup
|
||||||
|
for _, group := range file.Comments {
|
||||||
|
var list []*ast.Comment
|
||||||
|
for _, comment := range group.List {
|
||||||
|
if strings.HasPrefix(comment.Text, "//go:") && fset.Position(comment.Slash).Column == 1 {
|
||||||
|
list = append(list, comment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if list != nil {
|
||||||
|
comments = append(comments, &ast.CommentGroup{List: list})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return comments
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) print(w io.Writer) {
|
||||||
|
printer.Fprint(w, f.fset, f.astFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// intLiteral returns an ast.BasicLit representing the integer value.
|
||||||
|
func (f *File) intLiteral(i int) *ast.BasicLit {
|
||||||
|
node := &ast.BasicLit{
|
||||||
|
Kind: token.INT,
|
||||||
|
Value: fmt.Sprint(i),
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// index returns an ast.BasicLit representing the number of counters present.
|
||||||
|
func (f *File) index() *ast.BasicLit {
|
||||||
|
return f.intLiteral(len(f.blocks))
|
||||||
|
}
|
||||||
|
|
||||||
|
// setCounterStmt returns the expression: __count[23] = 1.
|
||||||
|
func setCounterStmt(f *File, counter ast.Expr) ast.Stmt {
|
||||||
|
return &ast.AssignStmt{
|
||||||
|
Lhs: []ast.Expr{counter},
|
||||||
|
Tok: token.ASSIGN,
|
||||||
|
Rhs: []ast.Expr{f.intLiteral(1)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// incCounterStmt returns the expression: __count[23]++.
|
||||||
|
func incCounterStmt(f *File, counter ast.Expr) ast.Stmt {
|
||||||
|
return &ast.IncDecStmt{
|
||||||
|
X: counter,
|
||||||
|
Tok: token.INC,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// atomicCounterStmt returns the expression: atomic.AddUint32(&__count[23], 1)
|
||||||
|
func atomicCounterStmt(f *File, counter ast.Expr) ast.Stmt {
|
||||||
|
return &ast.ExprStmt{
|
||||||
|
X: &ast.CallExpr{
|
||||||
|
Fun: &ast.SelectorExpr{
|
||||||
|
X: ast.NewIdent(f.atomicPkg),
|
||||||
|
Sel: ast.NewIdent("AddUint32"),
|
||||||
|
},
|
||||||
|
Args: []ast.Expr{&ast.UnaryExpr{
|
||||||
|
Op: token.AND,
|
||||||
|
X: counter,
|
||||||
|
},
|
||||||
|
f.intLiteral(1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCounter creates a new counter expression of the appropriate form.
|
||||||
|
func (f *File) newCounter(start, end token.Pos, numStmt int) ast.Stmt {
|
||||||
|
counter := &ast.IndexExpr{
|
||||||
|
X: &ast.SelectorExpr{
|
||||||
|
X: ast.NewIdent(*varVar),
|
||||||
|
Sel: ast.NewIdent("Count"),
|
||||||
|
},
|
||||||
|
Index: f.index(),
|
||||||
|
}
|
||||||
|
stmt := counterStmt(f, counter)
|
||||||
|
f.blocks = append(f.blocks, Block{start, end, numStmt})
|
||||||
|
return stmt
|
||||||
|
}
|
||||||
|
|
||||||
|
// addCounters takes a list of statements and adds counters to the beginning of
|
||||||
|
// each basic block at the top level of that list. For instance, given
|
||||||
|
//
|
||||||
|
// S1
|
||||||
|
// if cond {
|
||||||
|
// S2
|
||||||
|
// }
|
||||||
|
// S3
|
||||||
|
//
|
||||||
|
// counters will be added before S1 and before S3. The block containing S2
|
||||||
|
// will be visited in a separate call.
|
||||||
|
// TODO: Nested simple blocks get unnecessary (but correct) counters
|
||||||
|
func (f *File) addCounters(pos, blockEnd token.Pos, list []ast.Stmt, extendToClosingBrace bool) []ast.Stmt {
|
||||||
|
// Special case: make sure we add a counter to an empty block. Can't do this below
|
||||||
|
// or we will add a counter to an empty statement list after, say, a return statement.
|
||||||
|
if len(list) == 0 {
|
||||||
|
return []ast.Stmt{f.newCounter(pos, blockEnd, 0)}
|
||||||
|
}
|
||||||
|
// We have a block (statement list), but it may have several basic blocks due to the
|
||||||
|
// appearance of statements that affect the flow of control.
|
||||||
|
var newList []ast.Stmt
|
||||||
|
for {
|
||||||
|
// Find first statement that affects flow of control (break, continue, if, etc.).
|
||||||
|
// It will be the last statement of this basic block.
|
||||||
|
var last int
|
||||||
|
end := blockEnd
|
||||||
|
for last = 0; last < len(list); last++ {
|
||||||
|
end = f.statementBoundary(list[last])
|
||||||
|
if f.endsBasicSourceBlock(list[last]) {
|
||||||
|
extendToClosingBrace = false // Block is broken up now.
|
||||||
|
last++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if extendToClosingBrace {
|
||||||
|
end = blockEnd
|
||||||
|
}
|
||||||
|
if pos != end { // Can have no source to cover if e.g. blocks abut.
|
||||||
|
newList = append(newList, f.newCounter(pos, end, last))
|
||||||
|
}
|
||||||
|
newList = append(newList, list[0:last]...)
|
||||||
|
list = list[last:]
|
||||||
|
if len(list) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pos = list[0].Pos()
|
||||||
|
}
|
||||||
|
return newList
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasFuncLiteral reports the existence and position of the first func literal
|
||||||
|
// in the node, if any. If a func literal appears, it usually marks the termination
|
||||||
|
// of a basic block because the function body is itself a block.
|
||||||
|
// Therefore we draw a line at the start of the body of the first function literal we find.
|
||||||
|
// TODO: what if there's more than one? Probably doesn't matter much.
|
||||||
|
func hasFuncLiteral(n ast.Node) (bool, token.Pos) {
|
||||||
|
if n == nil {
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
var literal funcLitFinder
|
||||||
|
ast.Walk(&literal, n)
|
||||||
|
return literal.found(), token.Pos(literal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// statementBoundary finds the location in s that terminates the current basic
|
||||||
|
// block in the source.
|
||||||
|
func (f *File) statementBoundary(s ast.Stmt) token.Pos {
|
||||||
|
// Control flow statements are easy.
|
||||||
|
switch s := s.(type) {
|
||||||
|
case *ast.BlockStmt:
|
||||||
|
// Treat blocks like basic blocks to avoid overlapping counters.
|
||||||
|
return s.Lbrace
|
||||||
|
case *ast.IfStmt:
|
||||||
|
found, pos := hasFuncLiteral(s.Init)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
found, pos = hasFuncLiteral(s.Cond)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
return s.Body.Lbrace
|
||||||
|
case *ast.ForStmt:
|
||||||
|
found, pos := hasFuncLiteral(s.Init)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
found, pos = hasFuncLiteral(s.Cond)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
found, pos = hasFuncLiteral(s.Post)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
return s.Body.Lbrace
|
||||||
|
case *ast.LabeledStmt:
|
||||||
|
return f.statementBoundary(s.Stmt)
|
||||||
|
case *ast.RangeStmt:
|
||||||
|
found, pos := hasFuncLiteral(s.X)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
return s.Body.Lbrace
|
||||||
|
case *ast.SwitchStmt:
|
||||||
|
found, pos := hasFuncLiteral(s.Init)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
found, pos = hasFuncLiteral(s.Tag)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
return s.Body.Lbrace
|
||||||
|
case *ast.SelectStmt:
|
||||||
|
return s.Body.Lbrace
|
||||||
|
case *ast.TypeSwitchStmt:
|
||||||
|
found, pos := hasFuncLiteral(s.Init)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
return s.Body.Lbrace
|
||||||
|
}
|
||||||
|
// If not a control flow statement, it is a declaration, expression, call, etc. and it may have a function literal.
|
||||||
|
// If it does, that's tricky because we want to exclude the body of the function from this block.
|
||||||
|
// Draw a line at the start of the body of the first function literal we find.
|
||||||
|
// TODO: what if there's more than one? Probably doesn't matter much.
|
||||||
|
found, pos := hasFuncLiteral(s)
|
||||||
|
if found {
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
return s.End()
|
||||||
|
}
|
||||||
|
|
||||||
|
// endsBasicSourceBlock reports whether s changes the flow of control: break, if, etc.,
|
||||||
|
// or if it's just problematic, for instance contains a function literal, which will complicate
|
||||||
|
// accounting due to the block-within-an expression.
|
||||||
|
func (f *File) endsBasicSourceBlock(s ast.Stmt) bool {
|
||||||
|
switch s := s.(type) {
|
||||||
|
case *ast.BlockStmt:
|
||||||
|
// Treat blocks like basic blocks to avoid overlapping counters.
|
||||||
|
return true
|
||||||
|
case *ast.BranchStmt:
|
||||||
|
return true
|
||||||
|
case *ast.ForStmt:
|
||||||
|
return true
|
||||||
|
case *ast.IfStmt:
|
||||||
|
return true
|
||||||
|
case *ast.LabeledStmt:
|
||||||
|
return f.endsBasicSourceBlock(s.Stmt)
|
||||||
|
case *ast.RangeStmt:
|
||||||
|
return true
|
||||||
|
case *ast.SwitchStmt:
|
||||||
|
return true
|
||||||
|
case *ast.SelectStmt:
|
||||||
|
return true
|
||||||
|
case *ast.TypeSwitchStmt:
|
||||||
|
return true
|
||||||
|
case *ast.ExprStmt:
|
||||||
|
// Calls to panic change the flow.
|
||||||
|
// We really should verify that "panic" is the predefined function,
|
||||||
|
// but without type checking we can't and the likelihood of it being
|
||||||
|
// an actual problem is vanishingly small.
|
||||||
|
if call, ok := s.X.(*ast.CallExpr); ok {
|
||||||
|
if ident, ok := call.Fun.(*ast.Ident); ok && ident.Name == "panic" && len(call.Args) == 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
found, _ := hasFuncLiteral(s)
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
// funcLitFinder implements the ast.Visitor pattern to find the location of any
|
||||||
|
// function literal in a subtree.
|
||||||
|
type funcLitFinder token.Pos
|
||||||
|
|
||||||
|
func (f *funcLitFinder) Visit(node ast.Node) (w ast.Visitor) {
|
||||||
|
if f.found() {
|
||||||
|
return nil // Prune search.
|
||||||
|
}
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.FuncLit:
|
||||||
|
*f = funcLitFinder(n.Body.Lbrace)
|
||||||
|
return nil // Prune search.
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *funcLitFinder) found() bool {
|
||||||
|
return token.Pos(*f) != token.NoPos
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort interface for []block1; used for self-check in addVariables.
|
||||||
|
|
||||||
|
type block1 struct {
|
||||||
|
Block
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
type blockSlice []block1
|
||||||
|
|
||||||
|
func (b blockSlice) Len() int { return len(b) }
|
||||||
|
func (b blockSlice) Less(i, j int) bool { return b[i].startByte < b[j].startByte }
|
||||||
|
func (b blockSlice) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
|
||||||
|
// offset translates a token position into a 0-indexed byte offset.
|
||||||
|
func (f *File) offset(pos token.Pos) int {
|
||||||
|
return f.fset.Position(pos).Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
// addVariables adds to the end of the file the declarations to set up the counter and position variables.
|
||||||
|
func (f *File) addVariables(w io.Writer) {
|
||||||
|
// Self-check: Verify that the instrumented basic blocks are disjoint.
|
||||||
|
t := make([]block1, len(f.blocks))
|
||||||
|
for i := range f.blocks {
|
||||||
|
t[i].Block = f.blocks[i]
|
||||||
|
t[i].index = i
|
||||||
|
}
|
||||||
|
sort.Sort(blockSlice(t))
|
||||||
|
for i := 1; i < len(t); i++ {
|
||||||
|
if t[i-1].endByte > t[i].startByte {
|
||||||
|
fmt.Fprintf(os.Stderr, "cover: internal error: block %d overlaps block %d\n", t[i-1].index, t[i].index)
|
||||||
|
// Note: error message is in byte positions, not token positions.
|
||||||
|
fmt.Fprintf(os.Stderr, "\t%s:#%d,#%d %s:#%d,#%d\n",
|
||||||
|
f.name, f.offset(t[i-1].startByte), f.offset(t[i-1].endByte),
|
||||||
|
f.name, f.offset(t[i].startByte), f.offset(t[i].endByte))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Declare the coverage struct as a package-level variable.
|
||||||
|
fmt.Fprintf(w, "\nvar %s = struct {\n", *varVar)
|
||||||
|
fmt.Fprintf(w, "\tCount [%d]uint32\n", len(f.blocks))
|
||||||
|
fmt.Fprintf(w, "\tPos [3 * %d]uint32\n", len(f.blocks))
|
||||||
|
fmt.Fprintf(w, "\tNumStmt [%d]uint16\n", len(f.blocks))
|
||||||
|
fmt.Fprintf(w, "} {\n")
|
||||||
|
|
||||||
|
// Initialize the position array field.
|
||||||
|
fmt.Fprintf(w, "\tPos: [3 * %d]uint32{\n", len(f.blocks))
|
||||||
|
|
||||||
|
// A nice long list of positions. Each position is encoded as follows to reduce size:
|
||||||
|
// - 32-bit starting line number
|
||||||
|
// - 32-bit ending line number
|
||||||
|
// - (16 bit ending column number << 16) | (16-bit starting column number).
|
||||||
|
for i, block := range f.blocks {
|
||||||
|
start := f.fset.Position(block.startByte)
|
||||||
|
end := f.fset.Position(block.endByte)
|
||||||
|
fmt.Fprintf(w, "\t\t%d, %d, %#x, // [%d]\n", start.Line, end.Line, (end.Column&0xFFFF)<<16|(start.Column&0xFFFF), i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the position array.
|
||||||
|
fmt.Fprintf(w, "\t},\n")
|
||||||
|
|
||||||
|
// Initialize the position array field.
|
||||||
|
fmt.Fprintf(w, "\tNumStmt: [%d]uint16{\n", len(f.blocks))
|
||||||
|
|
||||||
|
// A nice long list of statements-per-block, so we can give a conventional
|
||||||
|
// valuation of "percent covered". To save space, it's a 16-bit number, so we
|
||||||
|
// clamp it if it overflows - won't matter in practice.
|
||||||
|
for i, block := range f.blocks {
|
||||||
|
n := block.numStmt
|
||||||
|
if n > 1<<16-1 {
|
||||||
|
n = 1<<16 - 1
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "\t\t%d, // %d\n", n, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the statements-per-block array.
|
||||||
|
fmt.Fprintf(w, "\t},\n")
|
||||||
|
|
||||||
|
// Close the struct initialization.
|
||||||
|
fmt.Fprintf(w, "}\n")
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Cover is a program for analyzing the coverage profiles generated by
|
||||||
|
'go test -coverprofile=cover.out'.
|
||||||
|
|
||||||
|
Cover is also used by 'go test -cover' to rewrite the source code with
|
||||||
|
annotations to track which parts of each function are executed.
|
||||||
|
It operates on one Go source file at a time, computing approximate
|
||||||
|
basic block information by studying the source. It is thus more portable
|
||||||
|
than binary-rewriting coverage tools, but also a little less capable.
|
||||||
|
For instance, it does not probe inside && and || expressions, and can
|
||||||
|
be mildly confused by single statements with multiple function literals.
|
||||||
|
|
||||||
|
For usage information, please see:
|
||||||
|
go help testflag
|
||||||
|
go tool cover -help
|
||||||
|
|
||||||
|
No longer maintained:
|
||||||
|
|
||||||
|
For Go releases 1.5 and later, this tool lives in the
|
||||||
|
standard repository. The code here is not maintained.
|
||||||
|
|
||||||
|
*/
|
||||||
|
package main // import "golang.org/x/tools/cmd/cover"
|
|
@ -0,0 +1,166 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file implements the visitor that computes the (line, column)-(line-column) range for each function.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/build"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"golang.org/x/tools/cover"
|
||||||
|
)
|
||||||
|
|
||||||
|
// funcOutput takes two file names as arguments, a coverage profile to read as input and an output
|
||||||
|
// file to write ("" means to write to standard output). The function reads the profile and produces
|
||||||
|
// as output the coverage data broken down by function, like this:
|
||||||
|
//
|
||||||
|
// fmt/format.go:30: init 100.0%
|
||||||
|
// fmt/format.go:57: clearflags 100.0%
|
||||||
|
// ...
|
||||||
|
// fmt/scan.go:1046: doScan 100.0%
|
||||||
|
// fmt/scan.go:1075: advance 96.2%
|
||||||
|
// fmt/scan.go:1119: doScanf 96.8%
|
||||||
|
// total: (statements) 91.9%
|
||||||
|
|
||||||
|
func funcOutput(profile, outputFile string) error {
|
||||||
|
profiles, err := cover.ParseProfiles(profile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var out *bufio.Writer
|
||||||
|
if outputFile == "" {
|
||||||
|
out = bufio.NewWriter(os.Stdout)
|
||||||
|
} else {
|
||||||
|
fd, err := os.Create(outputFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
out = bufio.NewWriter(fd)
|
||||||
|
}
|
||||||
|
defer out.Flush()
|
||||||
|
|
||||||
|
tabber := tabwriter.NewWriter(out, 1, 8, 1, '\t', 0)
|
||||||
|
defer tabber.Flush()
|
||||||
|
|
||||||
|
var total, covered int64
|
||||||
|
for _, profile := range profiles {
|
||||||
|
fn := profile.FileName
|
||||||
|
file, err := findFile(fn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
funcs, err := findFuncs(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Now match up functions and profile blocks.
|
||||||
|
for _, f := range funcs {
|
||||||
|
c, t := f.coverage(profile)
|
||||||
|
fmt.Fprintf(tabber, "%s:%d:\t%s\t%.1f%%\n", fn, f.startLine, f.name, 100.0*float64(c)/float64(t))
|
||||||
|
total += t
|
||||||
|
covered += c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(tabber, "total:\t(statements)\t%.1f%%\n", 100.0*float64(covered)/float64(total))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findFuncs parses the file and returns a slice of FuncExtent descriptors.
|
||||||
|
func findFuncs(name string) ([]*FuncExtent, error) {
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
parsedFile, err := parser.ParseFile(fset, name, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
visitor := &FuncVisitor{
|
||||||
|
fset: fset,
|
||||||
|
name: name,
|
||||||
|
astFile: parsedFile,
|
||||||
|
}
|
||||||
|
ast.Walk(visitor, visitor.astFile)
|
||||||
|
return visitor.funcs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuncExtent describes a function's extent in the source by file and position.
|
||||||
|
type FuncExtent struct {
|
||||||
|
name string
|
||||||
|
startLine int
|
||||||
|
startCol int
|
||||||
|
endLine int
|
||||||
|
endCol int
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuncVisitor implements the visitor that builds the function position list for a file.
|
||||||
|
type FuncVisitor struct {
|
||||||
|
fset *token.FileSet
|
||||||
|
name string // Name of file.
|
||||||
|
astFile *ast.File
|
||||||
|
funcs []*FuncExtent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit implements the ast.Visitor interface.
|
||||||
|
func (v *FuncVisitor) Visit(node ast.Node) ast.Visitor {
|
||||||
|
switch n := node.(type) {
|
||||||
|
case *ast.FuncDecl:
|
||||||
|
start := v.fset.Position(n.Pos())
|
||||||
|
end := v.fset.Position(n.End())
|
||||||
|
fe := &FuncExtent{
|
||||||
|
name: n.Name.Name,
|
||||||
|
startLine: start.Line,
|
||||||
|
startCol: start.Column,
|
||||||
|
endLine: end.Line,
|
||||||
|
endCol: end.Column,
|
||||||
|
}
|
||||||
|
v.funcs = append(v.funcs, fe)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// coverage returns the fraction of the statements in the function that were covered, as a numerator and denominator.
|
||||||
|
func (f *FuncExtent) coverage(profile *cover.Profile) (num, den int64) {
|
||||||
|
// We could avoid making this n^2 overall by doing a single scan and annotating the functions,
|
||||||
|
// but the sizes of the data structures is never very large and the scan is almost instantaneous.
|
||||||
|
var covered, total int64
|
||||||
|
// The blocks are sorted, so we can stop counting as soon as we reach the end of the relevant block.
|
||||||
|
for _, b := range profile.Blocks {
|
||||||
|
if b.StartLine > f.endLine || (b.StartLine == f.endLine && b.StartCol >= f.endCol) {
|
||||||
|
// Past the end of the function.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if b.EndLine < f.startLine || (b.EndLine == f.startLine && b.EndCol <= f.startCol) {
|
||||||
|
// Before the beginning of the function
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
total += int64(b.NumStmt)
|
||||||
|
if b.Count > 0 {
|
||||||
|
covered += int64(b.NumStmt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if total == 0 {
|
||||||
|
total = 1 // Avoid zero denominator.
|
||||||
|
}
|
||||||
|
return covered, total
|
||||||
|
}
|
||||||
|
|
||||||
|
// findFile finds the location of the named file in GOROOT, GOPATH etc.
|
||||||
|
func findFile(file string) (string, error) {
|
||||||
|
dir, file := filepath.Split(file)
|
||||||
|
pkg, err := build.Import(dir, ".", build.FindOnly)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("can't find %q: %v", file, err)
|
||||||
|
}
|
||||||
|
return filepath.Join(pkg.Dir, file), nil
|
||||||
|
}
|
|
@ -0,0 +1,284 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"html/template"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"golang.org/x/tools/cover"
|
||||||
|
)
|
||||||
|
|
||||||
|
// htmlOutput reads the profile data from profile and generates an HTML
|
||||||
|
// coverage report, writing it to outfile. If outfile is empty,
|
||||||
|
// it writes the report to a temporary file and opens it in a web browser.
|
||||||
|
func htmlOutput(profile, outfile string) error {
|
||||||
|
profiles, err := cover.ParseProfiles(profile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var d templateData
|
||||||
|
|
||||||
|
for _, profile := range profiles {
|
||||||
|
fn := profile.FileName
|
||||||
|
if profile.Mode == "set" {
|
||||||
|
d.Set = true
|
||||||
|
}
|
||||||
|
file, err := findFile(fn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
src, err := ioutil.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't read %q: %v", fn, err)
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err = htmlGen(&buf, src, profile.Boundaries(src))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Files = append(d.Files, &templateFile{
|
||||||
|
Name: fn,
|
||||||
|
Body: template.HTML(buf.String()),
|
||||||
|
Coverage: percentCovered(profile),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var out *os.File
|
||||||
|
if outfile == "" {
|
||||||
|
var dir string
|
||||||
|
dir, err = ioutil.TempDir("", "cover")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out, err = os.Create(filepath.Join(dir, "coverage.html"))
|
||||||
|
} else {
|
||||||
|
out, err = os.Create(outfile)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = htmlTemplate.Execute(out, d)
|
||||||
|
if err == nil {
|
||||||
|
err = out.Close()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if outfile == "" {
|
||||||
|
if !startBrowser("file://" + out.Name()) {
|
||||||
|
fmt.Fprintf(os.Stderr, "HTML output written to %s\n", out.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// percentCovered returns, as a percentage, the fraction of the statements in
|
||||||
|
// the profile covered by the test run.
|
||||||
|
// In effect, it reports the coverage of a given source file.
|
||||||
|
func percentCovered(p *cover.Profile) float64 {
|
||||||
|
var total, covered int64
|
||||||
|
for _, b := range p.Blocks {
|
||||||
|
total += int64(b.NumStmt)
|
||||||
|
if b.Count > 0 {
|
||||||
|
covered += int64(b.NumStmt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if total == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return float64(covered) / float64(total) * 100
|
||||||
|
}
|
||||||
|
|
||||||
|
// htmlGen generates an HTML coverage report with the provided filename,
|
||||||
|
// source code, and tokens, and writes it to the given Writer.
|
||||||
|
func htmlGen(w io.Writer, src []byte, boundaries []cover.Boundary) error {
|
||||||
|
dst := bufio.NewWriter(w)
|
||||||
|
for i := range src {
|
||||||
|
for len(boundaries) > 0 && boundaries[0].Offset == i {
|
||||||
|
b := boundaries[0]
|
||||||
|
if b.Start {
|
||||||
|
n := 0
|
||||||
|
if b.Count > 0 {
|
||||||
|
n = int(math.Floor(b.Norm*9)) + 1
|
||||||
|
}
|
||||||
|
fmt.Fprintf(dst, `<span class="cov%v" title="%v">`, n, b.Count)
|
||||||
|
} else {
|
||||||
|
dst.WriteString("</span>")
|
||||||
|
}
|
||||||
|
boundaries = boundaries[1:]
|
||||||
|
}
|
||||||
|
switch b := src[i]; b {
|
||||||
|
case '>':
|
||||||
|
dst.WriteString(">")
|
||||||
|
case '<':
|
||||||
|
dst.WriteString("<")
|
||||||
|
case '&':
|
||||||
|
dst.WriteString("&")
|
||||||
|
case '\t':
|
||||||
|
dst.WriteString(" ")
|
||||||
|
default:
|
||||||
|
dst.WriteByte(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// startBrowser tries to open the URL in a browser
|
||||||
|
// and reports whether it succeeds.
|
||||||
|
func startBrowser(url string) bool {
|
||||||
|
// try to start the browser
|
||||||
|
var args []string
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "darwin":
|
||||||
|
args = []string{"open"}
|
||||||
|
case "windows":
|
||||||
|
args = []string{"cmd", "/c", "start"}
|
||||||
|
default:
|
||||||
|
args = []string{"xdg-open"}
|
||||||
|
}
|
||||||
|
cmd := exec.Command(args[0], append(args[1:], url)...)
|
||||||
|
return cmd.Start() == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rgb returns an rgb value for the specified coverage value
|
||||||
|
// between 0 (no coverage) and 10 (max coverage).
|
||||||
|
func rgb(n int) string {
|
||||||
|
if n == 0 {
|
||||||
|
return "rgb(192, 0, 0)" // Red
|
||||||
|
}
|
||||||
|
// Gradient from gray to green.
|
||||||
|
r := 128 - 12*(n-1)
|
||||||
|
g := 128 + 12*(n-1)
|
||||||
|
b := 128 + 3*(n-1)
|
||||||
|
return fmt.Sprintf("rgb(%v, %v, %v)", r, g, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// colors generates the CSS rules for coverage colors.
|
||||||
|
func colors() template.CSS {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
fmt.Fprintf(&buf, ".cov%v { color: %v }\n", i, rgb(i))
|
||||||
|
}
|
||||||
|
return template.CSS(buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
var htmlTemplate = template.Must(template.New("html").Funcs(template.FuncMap{
|
||||||
|
"colors": colors,
|
||||||
|
}).Parse(tmplHTML))
|
||||||
|
|
||||||
|
type templateData struct {
|
||||||
|
Files []*templateFile
|
||||||
|
Set bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type templateFile struct {
|
||||||
|
Name string
|
||||||
|
Body template.HTML
|
||||||
|
Coverage float64
|
||||||
|
}
|
||||||
|
|
||||||
|
const tmplHTML = `
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
background: black;
|
||||||
|
color: rgb(80, 80, 80);
|
||||||
|
}
|
||||||
|
body, pre, #legend span {
|
||||||
|
font-family: Menlo, monospace;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
#topbar {
|
||||||
|
background: black;
|
||||||
|
position: fixed;
|
||||||
|
top: 0; left: 0; right: 0;
|
||||||
|
height: 42px;
|
||||||
|
border-bottom: 1px solid rgb(80, 80, 80);
|
||||||
|
}
|
||||||
|
#content {
|
||||||
|
margin-top: 50px;
|
||||||
|
}
|
||||||
|
#nav, #legend {
|
||||||
|
float: left;
|
||||||
|
margin-left: 10px;
|
||||||
|
}
|
||||||
|
#legend {
|
||||||
|
margin-top: 12px;
|
||||||
|
}
|
||||||
|
#nav {
|
||||||
|
margin-top: 10px;
|
||||||
|
}
|
||||||
|
#legend span {
|
||||||
|
margin: 0 5px;
|
||||||
|
}
|
||||||
|
{{colors}}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div id="topbar">
|
||||||
|
<div id="nav">
|
||||||
|
<select id="files">
|
||||||
|
{{range $i, $f := .Files}}
|
||||||
|
<option value="file{{$i}}">{{$f.Name}} ({{printf "%.1f" $f.Coverage}}%)</option>
|
||||||
|
{{end}}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div id="legend">
|
||||||
|
<span>not tracked</span>
|
||||||
|
{{if .Set}}
|
||||||
|
<span class="cov0">not covered</span>
|
||||||
|
<span class="cov8">covered</span>
|
||||||
|
{{else}}
|
||||||
|
<span class="cov0">no coverage</span>
|
||||||
|
<span class="cov1">low coverage</span>
|
||||||
|
<span class="cov2">*</span>
|
||||||
|
<span class="cov3">*</span>
|
||||||
|
<span class="cov4">*</span>
|
||||||
|
<span class="cov5">*</span>
|
||||||
|
<span class="cov6">*</span>
|
||||||
|
<span class="cov7">*</span>
|
||||||
|
<span class="cov8">*</span>
|
||||||
|
<span class="cov9">*</span>
|
||||||
|
<span class="cov10">high coverage</span>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div id="content">
|
||||||
|
{{range $i, $f := .Files}}
|
||||||
|
<pre class="file" id="file{{$i}}" {{if $i}}style="display: none"{{end}}>{{$f.Body}}</pre>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
<script>
|
||||||
|
(function() {
|
||||||
|
var files = document.getElementById('files');
|
||||||
|
var visible = document.getElementById('file0');
|
||||||
|
files.addEventListener('change', onChange, false);
|
||||||
|
function onChange() {
|
||||||
|
visible.style.display = 'none';
|
||||||
|
visible = document.getElementById(files.value);
|
||||||
|
visible.style.display = 'block';
|
||||||
|
window.scrollTo(0, 0);
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
</html>
|
||||||
|
`
|
|
@ -0,0 +1,643 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer
|
||||||
|
// interface. Given the name of a (signed or unsigned) integer type T that has constants
|
||||||
|
// defined, stringer will create a new self-contained Go source file implementing
|
||||||
|
// func (t T) String() string
|
||||||
|
// The file is created in the same package and directory as the package that defines T.
|
||||||
|
// It has helpful defaults designed for use with go generate.
|
||||||
|
//
|
||||||
|
// Stringer works best with constants that are consecutive values such as created using iota,
|
||||||
|
// but creates good code regardless. In the future it might also provide custom support for
|
||||||
|
// constant sets that are bit patterns.
|
||||||
|
//
|
||||||
|
// For example, given this snippet,
|
||||||
|
//
|
||||||
|
// package painkiller
|
||||||
|
//
|
||||||
|
// type Pill int
|
||||||
|
//
|
||||||
|
// const (
|
||||||
|
// Placebo Pill = iota
|
||||||
|
// Aspirin
|
||||||
|
// Ibuprofen
|
||||||
|
// Paracetamol
|
||||||
|
// Acetaminophen = Paracetamol
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// running this command
|
||||||
|
//
|
||||||
|
// stringer -type=Pill
|
||||||
|
//
|
||||||
|
// in the same directory will create the file pill_string.go, in package painkiller,
|
||||||
|
// containing a definition of
|
||||||
|
//
|
||||||
|
// func (Pill) String() string
|
||||||
|
//
|
||||||
|
// That method will translate the value of a Pill constant to the string representation
|
||||||
|
// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will
|
||||||
|
// print the string "Aspirin".
|
||||||
|
//
|
||||||
|
// Typically this process would be run using go generate, like this:
|
||||||
|
//
|
||||||
|
// //go:generate stringer -type=Pill
|
||||||
|
//
|
||||||
|
// If multiple constants have the same value, the lexically first matching name will
|
||||||
|
// be used (in the example, Acetaminophen will print as "Paracetamol").
|
||||||
|
//
|
||||||
|
// With no arguments, it processes the package in the current directory.
|
||||||
|
// Otherwise, the arguments must name a single directory holding a Go package
|
||||||
|
// or a set of Go source files that represent a single Go package.
|
||||||
|
//
|
||||||
|
// The -type flag accepts a comma-separated list of types so a single run can
|
||||||
|
// generate methods for multiple types. The default output file is t_string.go,
|
||||||
|
// where t is the lower-cased name of the first type listed. It can be overridden
|
||||||
|
// with the -output flag.
|
||||||
|
//
|
||||||
|
package main // import "golang.org/x/tools/cmd/stringer"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/constant"
|
||||||
|
"go/format"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/packages"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeNames = flag.String("type", "", "comma-separated list of type names; must be set")
|
||||||
|
output = flag.String("output", "", "output file name; default srcdir/<type>_string.go")
|
||||||
|
trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names")
|
||||||
|
linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present")
|
||||||
|
buildTags = flag.String("tags", "", "comma-separated list of build tags to apply")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Usage is a replacement usage function for the flags package.
|
||||||
|
func Usage() {
|
||||||
|
fmt.Fprintf(os.Stderr, "Usage of stringer:\n")
|
||||||
|
fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n")
|
||||||
|
fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n")
|
||||||
|
fmt.Fprintf(os.Stderr, "For more information, see:\n")
|
||||||
|
fmt.Fprintf(os.Stderr, "\thttp://godoc.org/golang.org/x/tools/cmd/stringer\n")
|
||||||
|
fmt.Fprintf(os.Stderr, "Flags:\n")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix("stringer: ")
|
||||||
|
flag.Usage = Usage
|
||||||
|
flag.Parse()
|
||||||
|
if len(*typeNames) == 0 {
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
types := strings.Split(*typeNames, ",")
|
||||||
|
var tags []string
|
||||||
|
if len(*buildTags) > 0 {
|
||||||
|
tags = strings.Split(*buildTags, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We accept either one directory or a list of files. Which do we have?
|
||||||
|
args := flag.Args()
|
||||||
|
if len(args) == 0 {
|
||||||
|
// Default: process whole package in current directory.
|
||||||
|
args = []string{"."}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the package once.
|
||||||
|
var dir string
|
||||||
|
g := Generator{
|
||||||
|
trimPrefix: *trimprefix,
|
||||||
|
lineComment: *linecomment,
|
||||||
|
}
|
||||||
|
// TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc).
|
||||||
|
if len(args) == 1 && isDirectory(args[0]) {
|
||||||
|
dir = args[0]
|
||||||
|
} else {
|
||||||
|
if len(tags) != 0 {
|
||||||
|
log.Fatal("-tags option applies only to directories, not when files are specified")
|
||||||
|
}
|
||||||
|
dir = filepath.Dir(args[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
g.parsePackage(args, tags)
|
||||||
|
|
||||||
|
// Print the header and package clause.
|
||||||
|
g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " "))
|
||||||
|
g.Printf("\n")
|
||||||
|
g.Printf("package %s", g.pkg.name)
|
||||||
|
g.Printf("\n")
|
||||||
|
g.Printf("import \"strconv\"\n") // Used by all methods.
|
||||||
|
|
||||||
|
// Run generate for each type.
|
||||||
|
for _, typeName := range types {
|
||||||
|
g.generate(typeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format the output.
|
||||||
|
src := g.format()
|
||||||
|
|
||||||
|
// Write to file.
|
||||||
|
outputName := *output
|
||||||
|
if outputName == "" {
|
||||||
|
baseName := fmt.Sprintf("%s_string.go", types[0])
|
||||||
|
outputName = filepath.Join(dir, strings.ToLower(baseName))
|
||||||
|
}
|
||||||
|
err := ioutil.WriteFile(outputName, src, 0644)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("writing output: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDirectory reports whether the named file is a directory.
|
||||||
|
func isDirectory(name string) bool {
|
||||||
|
info, err := os.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
return info.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generator holds the state of the analysis. Primarily used to buffer
|
||||||
|
// the output for format.Source.
|
||||||
|
type Generator struct {
|
||||||
|
buf bytes.Buffer // Accumulated output.
|
||||||
|
pkg *Package // Package we are scanning.
|
||||||
|
|
||||||
|
trimPrefix string
|
||||||
|
lineComment bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) Printf(format string, args ...interface{}) {
|
||||||
|
fmt.Fprintf(&g.buf, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// File holds a single parsed file and associated data.
|
||||||
|
type File struct {
|
||||||
|
pkg *Package // Package to which this file belongs.
|
||||||
|
file *ast.File // Parsed AST.
|
||||||
|
// These fields are reset for each type being generated.
|
||||||
|
typeName string // Name of the constant type.
|
||||||
|
values []Value // Accumulator for constant values of that type.
|
||||||
|
|
||||||
|
trimPrefix string
|
||||||
|
lineComment bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Package struct {
|
||||||
|
name string
|
||||||
|
defs map[*ast.Ident]types.Object
|
||||||
|
files []*File
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePackage analyzes the single package constructed from the patterns and tags.
|
||||||
|
// parsePackage exits if there is an error.
|
||||||
|
func (g *Generator) parsePackage(patterns []string, tags []string) {
|
||||||
|
cfg := &packages.Config{
|
||||||
|
Mode: packages.LoadSyntax,
|
||||||
|
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
|
||||||
|
// in a separate pass? For later.
|
||||||
|
Tests: false,
|
||||||
|
BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))},
|
||||||
|
}
|
||||||
|
pkgs, err := packages.Load(cfg, patterns...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(pkgs) != 1 {
|
||||||
|
log.Fatalf("error: %d packages found", len(pkgs))
|
||||||
|
}
|
||||||
|
g.addPackage(pkgs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPackage adds a type checked Package and its syntax files to the generator.
|
||||||
|
func (g *Generator) addPackage(pkg *packages.Package) {
|
||||||
|
g.pkg = &Package{
|
||||||
|
name: pkg.Name,
|
||||||
|
defs: pkg.TypesInfo.Defs,
|
||||||
|
files: make([]*File, len(pkg.Syntax)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, file := range pkg.Syntax {
|
||||||
|
g.pkg.files[i] = &File{
|
||||||
|
file: file,
|
||||||
|
pkg: g.pkg,
|
||||||
|
trimPrefix: g.trimPrefix,
|
||||||
|
lineComment: g.lineComment,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate produces the String method for the named type.
|
||||||
|
func (g *Generator) generate(typeName string) {
|
||||||
|
values := make([]Value, 0, 100)
|
||||||
|
for _, file := range g.pkg.files {
|
||||||
|
// Set the state for this run of the walker.
|
||||||
|
file.typeName = typeName
|
||||||
|
file.values = nil
|
||||||
|
if file.file != nil {
|
||||||
|
ast.Inspect(file.file, file.genDecl)
|
||||||
|
values = append(values, file.values...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(values) == 0 {
|
||||||
|
log.Fatalf("no values defined for type %s", typeName)
|
||||||
|
}
|
||||||
|
// Generate code that will fail if the constants change value.
|
||||||
|
g.Printf("func _() {\n")
|
||||||
|
g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n")
|
||||||
|
g.Printf("\t// Re-run the stringer command to generate them again.\n")
|
||||||
|
g.Printf("\tvar x [1]struct{}\n")
|
||||||
|
for _, v := range values {
|
||||||
|
g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str)
|
||||||
|
}
|
||||||
|
g.Printf("}\n")
|
||||||
|
runs := splitIntoRuns(values)
|
||||||
|
// The decision of which pattern to use depends on the number of
|
||||||
|
// runs in the numbers. If there's only one, it's easy. For more than
|
||||||
|
// one, there's a tradeoff between complexity and size of the data
|
||||||
|
// and code vs. the simplicity of a map. A map takes more space,
|
||||||
|
// but so does the code. The decision here (crossover at 10) is
|
||||||
|
// arbitrary, but considers that for large numbers of runs the cost
|
||||||
|
// of the linear scan in the switch might become important, and
|
||||||
|
// rather than use yet another algorithm such as binary search,
|
||||||
|
// we punt and use a map. In any case, the likelihood of a map
|
||||||
|
// being necessary for any realistic example other than bitmasks
|
||||||
|
// is very low. And bitmasks probably deserve their own analysis,
|
||||||
|
// to be done some other day.
|
||||||
|
switch {
|
||||||
|
case len(runs) == 1:
|
||||||
|
g.buildOneRun(runs, typeName)
|
||||||
|
case len(runs) <= 10:
|
||||||
|
g.buildMultipleRuns(runs, typeName)
|
||||||
|
default:
|
||||||
|
g.buildMap(runs, typeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitIntoRuns breaks the values into runs of contiguous sequences.
|
||||||
|
// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}.
|
||||||
|
// The input slice is known to be non-empty.
|
||||||
|
func splitIntoRuns(values []Value) [][]Value {
|
||||||
|
// We use stable sort so the lexically first name is chosen for equal elements.
|
||||||
|
sort.Stable(byValue(values))
|
||||||
|
// Remove duplicates. Stable sort has put the one we want to print first,
|
||||||
|
// so use that one. The String method won't care about which named constant
|
||||||
|
// was the argument, so the first name for the given value is the only one to keep.
|
||||||
|
// We need to do this because identical values would cause the switch or map
|
||||||
|
// to fail to compile.
|
||||||
|
j := 1
|
||||||
|
for i := 1; i < len(values); i++ {
|
||||||
|
if values[i].value != values[i-1].value {
|
||||||
|
values[j] = values[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
values = values[:j]
|
||||||
|
runs := make([][]Value, 0, 10)
|
||||||
|
for len(values) > 0 {
|
||||||
|
// One contiguous sequence per outer loop.
|
||||||
|
i := 1
|
||||||
|
for i < len(values) && values[i].value == values[i-1].value+1 {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
runs = append(runs, values[:i])
|
||||||
|
values = values[i:]
|
||||||
|
}
|
||||||
|
return runs
|
||||||
|
}
|
||||||
|
|
||||||
|
// format returns the gofmt-ed contents of the Generator's buffer.
|
||||||
|
func (g *Generator) format() []byte {
|
||||||
|
src, err := format.Source(g.buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
// Should never happen, but can arise when developing this code.
|
||||||
|
// The user can compile the output to see the error.
|
||||||
|
log.Printf("warning: internal error: invalid Go generated: %s", err)
|
||||||
|
log.Printf("warning: compile the package to analyze the error")
|
||||||
|
return g.buf.Bytes()
|
||||||
|
}
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value represents a declared constant.
|
||||||
|
type Value struct {
|
||||||
|
originalName string // The name of the constant.
|
||||||
|
name string // The name with trimmed prefix.
|
||||||
|
// The value is stored as a bit pattern alone. The boolean tells us
|
||||||
|
// whether to interpret it as an int64 or a uint64; the only place
|
||||||
|
// this matters is when sorting.
|
||||||
|
// Much of the time the str field is all we need; it is printed
|
||||||
|
// by Value.String.
|
||||||
|
value uint64 // Will be converted to int64 when needed.
|
||||||
|
signed bool // Whether the constant is a signed type.
|
||||||
|
str string // The string representation given by the "go/constant" package.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Value) String() string {
|
||||||
|
return v.str
|
||||||
|
}
|
||||||
|
|
||||||
|
// byValue lets us sort the constants into increasing order.
|
||||||
|
// We take care in the Less method to sort in signed or unsigned order,
|
||||||
|
// as appropriate.
|
||||||
|
type byValue []Value
|
||||||
|
|
||||||
|
func (b byValue) Len() int { return len(b) }
|
||||||
|
func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b byValue) Less(i, j int) bool {
|
||||||
|
if b[i].signed {
|
||||||
|
return int64(b[i].value) < int64(b[j].value)
|
||||||
|
}
|
||||||
|
return b[i].value < b[j].value
|
||||||
|
}
|
||||||
|
|
||||||
|
// genDecl processes one declaration clause.
|
||||||
|
func (f *File) genDecl(node ast.Node) bool {
|
||||||
|
decl, ok := node.(*ast.GenDecl)
|
||||||
|
if !ok || decl.Tok != token.CONST {
|
||||||
|
// We only care about const declarations.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// The name of the type of the constants we are declaring.
|
||||||
|
// Can change if this is a multi-element declaration.
|
||||||
|
typ := ""
|
||||||
|
// Loop over the elements of the declaration. Each element is a ValueSpec:
|
||||||
|
// a list of names possibly followed by a type, possibly followed by values.
|
||||||
|
// If the type and value are both missing, we carry down the type (and value,
|
||||||
|
// but the "go/types" package takes care of that).
|
||||||
|
for _, spec := range decl.Specs {
|
||||||
|
vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST.
|
||||||
|
if vspec.Type == nil && len(vspec.Values) > 0 {
|
||||||
|
// "X = 1". With no type but a value. If the constant is untyped,
|
||||||
|
// skip this vspec and reset the remembered type.
|
||||||
|
typ = ""
|
||||||
|
|
||||||
|
// If this is a simple type conversion, remember the type.
|
||||||
|
// We don't mind if this is actually a call; a qualified call won't
|
||||||
|
// be matched (that will be SelectorExpr, not Ident), and only unusual
|
||||||
|
// situations will result in a function call that appears to be
|
||||||
|
// a type conversion.
|
||||||
|
ce, ok := vspec.Values[0].(*ast.CallExpr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
id, ok := ce.Fun.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
typ = id.Name
|
||||||
|
}
|
||||||
|
if vspec.Type != nil {
|
||||||
|
// "X T". We have a type. Remember it.
|
||||||
|
ident, ok := vspec.Type.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
typ = ident.Name
|
||||||
|
}
|
||||||
|
if typ != f.typeName {
|
||||||
|
// This is not the type we're looking for.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// We now have a list of names (from one line of source code) all being
|
||||||
|
// declared with the desired type.
|
||||||
|
// Grab their names and actual values and store them in f.values.
|
||||||
|
for _, name := range vspec.Names {
|
||||||
|
if name.Name == "_" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// This dance lets the type checker find the values for us. It's a
|
||||||
|
// bit tricky: look up the object declared by the name, find its
|
||||||
|
// types.Const, and extract its value.
|
||||||
|
obj, ok := f.pkg.defs[name]
|
||||||
|
if !ok {
|
||||||
|
log.Fatalf("no value for constant %s", name)
|
||||||
|
}
|
||||||
|
info := obj.Type().Underlying().(*types.Basic).Info()
|
||||||
|
if info&types.IsInteger == 0 {
|
||||||
|
log.Fatalf("can't handle non-integer constant type %s", typ)
|
||||||
|
}
|
||||||
|
value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST.
|
||||||
|
if value.Kind() != constant.Int {
|
||||||
|
log.Fatalf("can't happen: constant is not an integer %s", name)
|
||||||
|
}
|
||||||
|
i64, isInt := constant.Int64Val(value)
|
||||||
|
u64, isUint := constant.Uint64Val(value)
|
||||||
|
if !isInt && !isUint {
|
||||||
|
log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String())
|
||||||
|
}
|
||||||
|
if !isInt {
|
||||||
|
u64 = uint64(i64)
|
||||||
|
}
|
||||||
|
v := Value{
|
||||||
|
originalName: name.Name,
|
||||||
|
value: u64,
|
||||||
|
signed: info&types.IsUnsigned == 0,
|
||||||
|
str: value.String(),
|
||||||
|
}
|
||||||
|
if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 {
|
||||||
|
v.name = strings.TrimSpace(c.Text())
|
||||||
|
} else {
|
||||||
|
v.name = strings.TrimPrefix(v.originalName, f.trimPrefix)
|
||||||
|
}
|
||||||
|
f.values = append(f.values, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helpers
|
||||||
|
|
||||||
|
// usize returns the number of bits of the smallest unsigned integer
|
||||||
|
// type that will hold n. Used to create the smallest possible slice of
|
||||||
|
// integers to use as indexes into the concatenated strings.
|
||||||
|
func usize(n int) int {
|
||||||
|
switch {
|
||||||
|
case n < 1<<8:
|
||||||
|
return 8
|
||||||
|
case n < 1<<16:
|
||||||
|
return 16
|
||||||
|
default:
|
||||||
|
// 2^32 is enough constants for anyone.
|
||||||
|
return 32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// declareIndexAndNameVars declares the index slices and concatenated names
|
||||||
|
// strings representing the runs of values.
|
||||||
|
func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) {
|
||||||
|
var indexes, names []string
|
||||||
|
for i, run := range runs {
|
||||||
|
index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i))
|
||||||
|
if len(run) != 1 {
|
||||||
|
indexes = append(indexes, index)
|
||||||
|
}
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
g.Printf("const (\n")
|
||||||
|
for _, name := range names {
|
||||||
|
g.Printf("\t%s\n", name)
|
||||||
|
}
|
||||||
|
g.Printf(")\n\n")
|
||||||
|
|
||||||
|
if len(indexes) > 0 {
|
||||||
|
g.Printf("var (")
|
||||||
|
for _, index := range indexes {
|
||||||
|
g.Printf("\t%s\n", index)
|
||||||
|
}
|
||||||
|
g.Printf(")\n\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars
|
||||||
|
func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) {
|
||||||
|
index, name := g.createIndexAndNameDecl(run, typeName, "")
|
||||||
|
g.Printf("const %s\n", name)
|
||||||
|
g.Printf("var %s\n", index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var".
|
||||||
|
func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) {
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
indexes := make([]int, len(run))
|
||||||
|
for i := range run {
|
||||||
|
b.WriteString(run[i].name)
|
||||||
|
indexes[i] = b.Len()
|
||||||
|
}
|
||||||
|
nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String())
|
||||||
|
nameLen := b.Len()
|
||||||
|
b.Reset()
|
||||||
|
fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen))
|
||||||
|
for i, v := range indexes {
|
||||||
|
if i > 0 {
|
||||||
|
fmt.Fprintf(b, ", ")
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "%d", v)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "}")
|
||||||
|
return b.String(), nameConst
|
||||||
|
}
|
||||||
|
|
||||||
|
// declareNameVars declares the concatenated names string representing all the values in the runs.
|
||||||
|
func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) {
|
||||||
|
g.Printf("const _%s_name%s = \"", typeName, suffix)
|
||||||
|
for _, run := range runs {
|
||||||
|
for i := range run {
|
||||||
|
g.Printf("%s", run[i].name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g.Printf("\"\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildOneRun generates the variables and String method for a single run of contiguous values.
|
||||||
|
func (g *Generator) buildOneRun(runs [][]Value, typeName string) {
|
||||||
|
values := runs[0]
|
||||||
|
g.Printf("\n")
|
||||||
|
g.declareIndexAndNameVar(values, typeName)
|
||||||
|
// The generated code is simple enough to write as a Printf format.
|
||||||
|
lessThanZero := ""
|
||||||
|
if values[0].signed {
|
||||||
|
lessThanZero = "i < 0 || "
|
||||||
|
}
|
||||||
|
if values[0].value == 0 { // Signed or unsigned, 0 is still 0.
|
||||||
|
g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero)
|
||||||
|
} else {
|
||||||
|
g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Arguments to format are:
|
||||||
|
// [1]: type name
|
||||||
|
// [2]: size of index element (8 for uint8 etc.)
|
||||||
|
// [3]: less than zero check (for signed types)
|
||||||
|
const stringOneRun = `func (i %[1]s) String() string {
|
||||||
|
if %[3]si >= %[1]s(len(_%[1]s_index)-1) {
|
||||||
|
return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
// Arguments to format are:
|
||||||
|
// [1]: type name
|
||||||
|
// [2]: lowest defined value for type, as a string
|
||||||
|
// [3]: size of index element (8 for uint8 etc.)
|
||||||
|
// [4]: less than zero check (for signed types)
|
||||||
|
/*
|
||||||
|
*/
|
||||||
|
const stringOneRunWithOffset = `func (i %[1]s) String() string {
|
||||||
|
i -= %[2]s
|
||||||
|
if %[4]si >= %[1]s(len(_%[1]s_index)-1) {
|
||||||
|
return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")"
|
||||||
|
}
|
||||||
|
return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values.
|
||||||
|
// For this pattern, a single Printf format won't do.
|
||||||
|
func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) {
|
||||||
|
g.Printf("\n")
|
||||||
|
g.declareIndexAndNameVars(runs, typeName)
|
||||||
|
g.Printf("func (i %s) String() string {\n", typeName)
|
||||||
|
g.Printf("\tswitch {\n")
|
||||||
|
for i, values := range runs {
|
||||||
|
if len(values) == 1 {
|
||||||
|
g.Printf("\tcase i == %s:\n", &values[0])
|
||||||
|
g.Printf("\t\treturn _%s_name_%d\n", typeName, i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1])
|
||||||
|
if values[0].value != 0 {
|
||||||
|
g.Printf("\t\ti -= %s\n", &values[0])
|
||||||
|
}
|
||||||
|
g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n",
|
||||||
|
typeName, i, typeName, i, typeName, i)
|
||||||
|
}
|
||||||
|
g.Printf("\tdefault:\n")
|
||||||
|
g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName)
|
||||||
|
g.Printf("\t}\n")
|
||||||
|
g.Printf("}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildMap handles the case where the space is so sparse a map is a reasonable fallback.
|
||||||
|
// It's a rare situation but has simple code.
|
||||||
|
func (g *Generator) buildMap(runs [][]Value, typeName string) {
|
||||||
|
g.Printf("\n")
|
||||||
|
g.declareNameVars(runs, typeName, "")
|
||||||
|
g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName)
|
||||||
|
n := 0
|
||||||
|
for _, values := range runs {
|
||||||
|
for _, value := range values {
|
||||||
|
g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name))
|
||||||
|
n += len(value.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g.Printf("}\n\n")
|
||||||
|
g.Printf(stringMap, typeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Argument to format is the type name.
|
||||||
|
const stringMap = `func (i %[1]s) String() string {
|
||||||
|
if str, ok := _%[1]s_map[i]; ok {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
`
|
|
@ -0,0 +1,213 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package cover provides support for parsing coverage profiles
|
||||||
|
// generated by "go test -coverprofile=cover.out".
|
||||||
|
package cover // import "golang.org/x/tools/cover"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Profile represents the profiling data for a specific file.
|
||||||
|
type Profile struct {
|
||||||
|
FileName string
|
||||||
|
Mode string
|
||||||
|
Blocks []ProfileBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProfileBlock represents a single block of profiling data.
|
||||||
|
type ProfileBlock struct {
|
||||||
|
StartLine, StartCol int
|
||||||
|
EndLine, EndCol int
|
||||||
|
NumStmt, Count int
|
||||||
|
}
|
||||||
|
|
||||||
|
type byFileName []*Profile
|
||||||
|
|
||||||
|
func (p byFileName) Len() int { return len(p) }
|
||||||
|
func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
|
||||||
|
func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
// ParseProfiles parses profile data in the specified file and returns a
|
||||||
|
// Profile for each source file described therein.
|
||||||
|
func ParseProfiles(fileName string) ([]*Profile, error) {
|
||||||
|
pf, err := os.Open(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer pf.Close()
|
||||||
|
|
||||||
|
files := make(map[string]*Profile)
|
||||||
|
buf := bufio.NewReader(pf)
|
||||||
|
// First line is "mode: foo", where foo is "set", "count", or "atomic".
|
||||||
|
// Rest of file is in the format
|
||||||
|
// encoding/base64/base64.go:34.44,37.40 3 1
|
||||||
|
// where the fields are: name.go:line.column,line.column numberOfStatements count
|
||||||
|
s := bufio.NewScanner(buf)
|
||||||
|
mode := ""
|
||||||
|
for s.Scan() {
|
||||||
|
line := s.Text()
|
||||||
|
if mode == "" {
|
||||||
|
const p = "mode: "
|
||||||
|
if !strings.HasPrefix(line, p) || line == p {
|
||||||
|
return nil, fmt.Errorf("bad mode line: %v", line)
|
||||||
|
}
|
||||||
|
mode = line[len(p):]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m := lineRe.FindStringSubmatch(line)
|
||||||
|
if m == nil {
|
||||||
|
return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, lineRe)
|
||||||
|
}
|
||||||
|
fn := m[1]
|
||||||
|
p := files[fn]
|
||||||
|
if p == nil {
|
||||||
|
p = &Profile{
|
||||||
|
FileName: fn,
|
||||||
|
Mode: mode,
|
||||||
|
}
|
||||||
|
files[fn] = p
|
||||||
|
}
|
||||||
|
p.Blocks = append(p.Blocks, ProfileBlock{
|
||||||
|
StartLine: toInt(m[2]),
|
||||||
|
StartCol: toInt(m[3]),
|
||||||
|
EndLine: toInt(m[4]),
|
||||||
|
EndCol: toInt(m[5]),
|
||||||
|
NumStmt: toInt(m[6]),
|
||||||
|
Count: toInt(m[7]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, p := range files {
|
||||||
|
sort.Sort(blocksByStart(p.Blocks))
|
||||||
|
// Merge samples from the same location.
|
||||||
|
j := 1
|
||||||
|
for i := 1; i < len(p.Blocks); i++ {
|
||||||
|
b := p.Blocks[i]
|
||||||
|
last := p.Blocks[j-1]
|
||||||
|
if b.StartLine == last.StartLine &&
|
||||||
|
b.StartCol == last.StartCol &&
|
||||||
|
b.EndLine == last.EndLine &&
|
||||||
|
b.EndCol == last.EndCol {
|
||||||
|
if b.NumStmt != last.NumStmt {
|
||||||
|
return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
|
||||||
|
}
|
||||||
|
if mode == "set" {
|
||||||
|
p.Blocks[j-1].Count |= b.Count
|
||||||
|
} else {
|
||||||
|
p.Blocks[j-1].Count += b.Count
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.Blocks[j] = b
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
p.Blocks = p.Blocks[:j]
|
||||||
|
}
|
||||||
|
// Generate a sorted slice.
|
||||||
|
profiles := make([]*Profile, 0, len(files))
|
||||||
|
for _, profile := range files {
|
||||||
|
profiles = append(profiles, profile)
|
||||||
|
}
|
||||||
|
sort.Sort(byFileName(profiles))
|
||||||
|
return profiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type blocksByStart []ProfileBlock
|
||||||
|
|
||||||
|
func (b blocksByStart) Len() int { return len(b) }
|
||||||
|
func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b blocksByStart) Less(i, j int) bool {
|
||||||
|
bi, bj := b[i], b[j]
|
||||||
|
return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
|
||||||
|
}
|
||||||
|
|
||||||
|
var lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`)
|
||||||
|
|
||||||
|
func toInt(s string) int {
|
||||||
|
i, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boundary represents the position in a source file of the beginning or end of a
|
||||||
|
// block as reported by the coverage profile. In HTML mode, it will correspond to
|
||||||
|
// the opening or closing of a <span> tag and will be used to colorize the source
|
||||||
|
type Boundary struct {
|
||||||
|
Offset int // Location as a byte offset in the source file.
|
||||||
|
Start bool // Is this the start of a block?
|
||||||
|
Count int // Event count from the cover profile.
|
||||||
|
Norm float64 // Count normalized to [0..1].
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boundaries returns a Profile as a set of Boundary objects within the provided src.
|
||||||
|
func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
|
||||||
|
// Find maximum count.
|
||||||
|
max := 0
|
||||||
|
for _, b := range p.Blocks {
|
||||||
|
if b.Count > max {
|
||||||
|
max = b.Count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Divisor for normalization.
|
||||||
|
divisor := math.Log(float64(max))
|
||||||
|
|
||||||
|
// boundary returns a Boundary, populating the Norm field with a normalized Count.
|
||||||
|
boundary := func(offset int, start bool, count int) Boundary {
|
||||||
|
b := Boundary{Offset: offset, Start: start, Count: count}
|
||||||
|
if !start || count == 0 {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
if max <= 1 {
|
||||||
|
b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
|
||||||
|
} else if count > 0 {
|
||||||
|
b.Norm = math.Log(float64(count)) / divisor
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
line, col := 1, 2 // TODO: Why is this 2?
|
||||||
|
for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
|
||||||
|
b := p.Blocks[bi]
|
||||||
|
if b.StartLine == line && b.StartCol == col {
|
||||||
|
boundaries = append(boundaries, boundary(si, true, b.Count))
|
||||||
|
}
|
||||||
|
if b.EndLine == line && b.EndCol == col || line > b.EndLine {
|
||||||
|
boundaries = append(boundaries, boundary(si, false, 0))
|
||||||
|
bi++
|
||||||
|
continue // Don't advance through src; maybe the next block starts here.
|
||||||
|
}
|
||||||
|
if src[si] == '\n' {
|
||||||
|
line++
|
||||||
|
col = 0
|
||||||
|
}
|
||||||
|
col++
|
||||||
|
si++
|
||||||
|
}
|
||||||
|
sort.Sort(boundariesByPos(boundaries))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type boundariesByPos []Boundary
|
||||||
|
|
||||||
|
func (b boundariesByPos) Len() int { return len(b) }
|
||||||
|
func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b boundariesByPos) Less(i, j int) bool {
|
||||||
|
if b[i].Offset == b[j].Offset {
|
||||||
|
return !b[i].Start && b[j].Start
|
||||||
|
}
|
||||||
|
return b[i].Offset < b[j].Offset
|
||||||
|
}
|
|
@ -0,0 +1,109 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package gcexportdata provides functions for locating, reading, and
|
||||||
|
// writing export data files containing type information produced by the
|
||||||
|
// gc compiler. This package supports go1.7 export data format and all
|
||||||
|
// later versions.
|
||||||
|
//
|
||||||
|
// Although it might seem convenient for this package to live alongside
|
||||||
|
// go/types in the standard library, this would cause version skew
|
||||||
|
// problems for developer tools that use it, since they must be able to
|
||||||
|
// consume the outputs of the gc compiler both before and after a Go
|
||||||
|
// update such as from Go 1.7 to Go 1.8. Because this package lives in
|
||||||
|
// golang.org/x/tools, sites can update their version of this repo some
|
||||||
|
// time before the Go 1.8 release and rebuild and redeploy their
|
||||||
|
// developer tools, which will then be able to consume both Go 1.7 and
|
||||||
|
// Go 1.8 export data files, so they will work before and after the
|
||||||
|
// Go update. (See discussion at https://golang.org/issue/15651.)
|
||||||
|
//
|
||||||
|
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/internal/gcimporter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Find returns the name of an object (.o) or archive (.a) file
|
||||||
|
// containing type information for the specified import path,
|
||||||
|
// using the workspace layout conventions of go/build.
|
||||||
|
// If no file was found, an empty filename is returned.
|
||||||
|
//
|
||||||
|
// A relative srcDir is interpreted relative to the current working directory.
|
||||||
|
//
|
||||||
|
// Find also returns the package's resolved (canonical) import path,
|
||||||
|
// reflecting the effects of srcDir and vendoring on importPath.
|
||||||
|
func Find(importPath, srcDir string) (filename, path string) {
|
||||||
|
return gcimporter.FindPkg(importPath, srcDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader returns a reader for the export data section of an object
|
||||||
|
// (.o) or archive (.a) file read from r. The new reader may provide
|
||||||
|
// additional trailing data beyond the end of the export data.
|
||||||
|
func NewReader(r io.Reader) (io.Reader, error) {
|
||||||
|
buf := bufio.NewReader(r)
|
||||||
|
_, err := gcimporter.FindExportData(buf)
|
||||||
|
// If we ever switch to a zip-like archive format with the ToC
|
||||||
|
// at the end, we can return the correct portion of export data,
|
||||||
|
// but for now we must return the entire rest of the file.
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads export data from in, decodes it, and returns type
|
||||||
|
// information for the package.
|
||||||
|
// The package name is specified by path.
|
||||||
|
// File position information is added to fset.
|
||||||
|
//
|
||||||
|
// Read may inspect and add to the imports map to ensure that references
|
||||||
|
// within the export data to other packages are consistent. The caller
|
||||||
|
// must ensure that imports[path] does not exist, or exists but is
|
||||||
|
// incomplete (see types.Package.Complete), and Read inserts the
|
||||||
|
// resulting package into this map entry.
|
||||||
|
//
|
||||||
|
// On return, the state of the reader is undefined.
|
||||||
|
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
|
||||||
|
data, err := ioutil.ReadAll(in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.HasPrefix(data, []byte("!<arch>")) {
|
||||||
|
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The App Engine Go runtime v1.6 uses the old export data format.
|
||||||
|
// TODO(adonovan): delete once v1.7 has been around for a while.
|
||||||
|
if bytes.HasPrefix(data, []byte("package ")) {
|
||||||
|
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The indexed export format starts with an 'i'; the older
|
||||||
|
// binary export format starts with a 'c', 'd', or 'v'
|
||||||
|
// (from "version"). Select appropriate importer.
|
||||||
|
if len(data) > 0 && data[0] == 'i' {
|
||||||
|
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
||||||
|
return pkg, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
|
||||||
|
return pkg, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes encoded type information for the specified package to out.
|
||||||
|
// The FileSet provides file position information for named objects.
|
||||||
|
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
||||||
|
b, err := gcimporter.BExportData(fset, pkg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = out.Write(b)
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package gcexportdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewImporter returns a new instance of the types.Importer interface
|
||||||
|
// that reads type information from export data files written by gc.
|
||||||
|
// The Importer also satisfies types.ImporterFrom.
|
||||||
|
//
|
||||||
|
// Export data files are located using "go build" workspace conventions
|
||||||
|
// and the build.Default context.
|
||||||
|
//
|
||||||
|
// Use this importer instead of go/importer.For("gc", ...) to avoid the
|
||||||
|
// version-skew problems described in the documentation of this package,
|
||||||
|
// or to control the FileSet or access the imports map populated during
|
||||||
|
// package loading.
|
||||||
|
//
|
||||||
|
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
|
||||||
|
return importer{fset, imports}
|
||||||
|
}
|
||||||
|
|
||||||
|
type importer struct {
|
||||||
|
fset *token.FileSet
|
||||||
|
imports map[string]*types.Package
|
||||||
|
}
|
||||||
|
|
||||||
|
func (imp importer) Import(importPath string) (*types.Package, error) {
|
||||||
|
return imp.ImportFrom(importPath, "", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
|
||||||
|
filename, path := Find(importPath, srcDir)
|
||||||
|
if filename == "" {
|
||||||
|
if importPath == "unsafe" {
|
||||||
|
// Even for unsafe, call Find first in case
|
||||||
|
// the package was vendored.
|
||||||
|
return types.Unsafe, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("can't find import: %s", importPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
|
||||||
|
return pkg, nil // cache hit
|
||||||
|
}
|
||||||
|
|
||||||
|
// open file
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
f.Close()
|
||||||
|
if err != nil {
|
||||||
|
// add file name to error
|
||||||
|
err = fmt.Errorf("reading export data: %s: %v", filename, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
r, err := NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return Read(r, imp.fset, imp.imports, path)
|
||||||
|
}
|
|
@ -0,0 +1,99 @@
|
||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// The gcexportdata command is a diagnostic tool that displays the
|
||||||
|
// contents of gc export data files.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/gcexportdata"
|
||||||
|
"golang.org/x/tools/go/types/typeutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var packageFlag = flag.String("package", "", "alternative package to print")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetPrefix("gcexportdata: ")
|
||||||
|
log.SetFlags(0)
|
||||||
|
flag.Usage = func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a")
|
||||||
|
}
|
||||||
|
flag.Parse()
|
||||||
|
if flag.NArg() != 1 {
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
filename := flag.Args()[0]
|
||||||
|
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := gcexportdata.NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the package.
|
||||||
|
const primary = "<primary>"
|
||||||
|
imports := make(map[string]*types.Package)
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
pkg, err := gcexportdata.Read(r, fset, imports, primary)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optionally select an indirectly mentioned package.
|
||||||
|
if *packageFlag != "" {
|
||||||
|
pkg = imports[*packageFlag]
|
||||||
|
if pkg == nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n",
|
||||||
|
filename, *packageFlag)
|
||||||
|
for p := range imports {
|
||||||
|
if p != primary {
|
||||||
|
fmt.Fprintf(os.Stderr, "\t%s\n", p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print all package-level declarations, including non-exported ones.
|
||||||
|
fmt.Printf("package %s\n", pkg.Name())
|
||||||
|
for _, imp := range pkg.Imports() {
|
||||||
|
fmt.Printf("import %q\n", imp.Path())
|
||||||
|
}
|
||||||
|
qual := func(p *types.Package) string {
|
||||||
|
if pkg == p {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return p.Name()
|
||||||
|
}
|
||||||
|
scope := pkg.Scope()
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
obj := scope.Lookup(name)
|
||||||
|
fmt.Printf("%s: %s\n",
|
||||||
|
fset.Position(obj.Pos()),
|
||||||
|
types.ObjectString(obj, qual))
|
||||||
|
|
||||||
|
// For types, print each method.
|
||||||
|
if _, ok := obj.(*types.TypeName); ok {
|
||||||
|
for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) {
|
||||||
|
fmt.Printf("%s: %s\n",
|
||||||
|
fset.Position(method.Obj().Pos()),
|
||||||
|
types.SelectionString(method, qual))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,852 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Binary package export.
|
||||||
|
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
|
||||||
|
// see that file for specification of the format.
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/constant"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// If debugFormat is set, each integer and string value is preceded by a marker
|
||||||
|
// and position information in the encoding. This mechanism permits an importer
|
||||||
|
// to recognize immediately when it is out of sync. The importer recognizes this
|
||||||
|
// mode automatically (i.e., it can import export data produced with debugging
|
||||||
|
// support even if debugFormat is not set at the time of import). This mode will
|
||||||
|
// lead to massively larger export data (by a factor of 2 to 3) and should only
|
||||||
|
// be enabled during development and debugging.
|
||||||
|
//
|
||||||
|
// NOTE: This flag is the first flag to enable if importing dies because of
|
||||||
|
// (suspected) format errors, and whenever a change is made to the format.
|
||||||
|
const debugFormat = false // default: false
|
||||||
|
|
||||||
|
// If trace is set, debugging output is printed to std out.
|
||||||
|
const trace = false // default: false
|
||||||
|
|
||||||
|
// Current export format version. Increase with each format change.
|
||||||
|
// Note: The latest binary (non-indexed) export format is at version 6.
|
||||||
|
// This exporter is still at level 4, but it doesn't matter since
|
||||||
|
// the binary importer can handle older versions just fine.
|
||||||
|
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
|
||||||
|
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
|
||||||
|
// 4: type name objects support type aliases, uses aliasTag
|
||||||
|
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
|
||||||
|
// 2: removed unused bool in ODCL export (compiler only)
|
||||||
|
// 1: header format change (more regular), export package for _ struct fields
|
||||||
|
// 0: Go1.7 encoding
|
||||||
|
const exportVersion = 4
|
||||||
|
|
||||||
|
// trackAllTypes enables cycle tracking for all types, not just named
|
||||||
|
// types. The existing compiler invariants assume that unnamed types
|
||||||
|
// that are not completely set up are not used, or else there are spurious
|
||||||
|
// errors.
|
||||||
|
// If disabled, only named types are tracked, possibly leading to slightly
|
||||||
|
// less efficient encoding in rare cases. It also prevents the export of
|
||||||
|
// some corner-case type declarations (but those are not handled correctly
|
||||||
|
// with with the textual export format either).
|
||||||
|
// TODO(gri) enable and remove once issues caused by it are fixed
|
||||||
|
const trackAllTypes = false
|
||||||
|
|
||||||
|
type exporter struct {
|
||||||
|
fset *token.FileSet
|
||||||
|
out bytes.Buffer
|
||||||
|
|
||||||
|
// object -> index maps, indexed in order of serialization
|
||||||
|
strIndex map[string]int
|
||||||
|
pkgIndex map[*types.Package]int
|
||||||
|
typIndex map[types.Type]int
|
||||||
|
|
||||||
|
// position encoding
|
||||||
|
posInfoFormat bool
|
||||||
|
prevFile string
|
||||||
|
prevLine int
|
||||||
|
|
||||||
|
// debugging support
|
||||||
|
written int // bytes written
|
||||||
|
indent int // for trace
|
||||||
|
}
|
||||||
|
|
||||||
|
// internalError represents an error generated inside this package.
|
||||||
|
type internalError string
|
||||||
|
|
||||||
|
func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
||||||
|
|
||||||
|
func internalErrorf(format string, args ...interface{}) error {
|
||||||
|
return internalError(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BExportData returns binary export data for pkg.
|
||||||
|
// If no file set is provided, position info will be missing.
|
||||||
|
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
if ierr, ok := e.(internalError); ok {
|
||||||
|
err = ierr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Not an internal error; panic again.
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
p := exporter{
|
||||||
|
fset: fset,
|
||||||
|
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
|
||||||
|
pkgIndex: make(map[*types.Package]int),
|
||||||
|
typIndex: make(map[types.Type]int),
|
||||||
|
posInfoFormat: true, // TODO(gri) might become a flag, eventually
|
||||||
|
}
|
||||||
|
|
||||||
|
// write version info
|
||||||
|
// The version string must start with "version %d" where %d is the version
|
||||||
|
// number. Additional debugging information may follow after a blank; that
|
||||||
|
// text is ignored by the importer.
|
||||||
|
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
|
||||||
|
var debug string
|
||||||
|
if debugFormat {
|
||||||
|
debug = "debug"
|
||||||
|
}
|
||||||
|
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
|
||||||
|
p.bool(trackAllTypes)
|
||||||
|
p.bool(p.posInfoFormat)
|
||||||
|
|
||||||
|
// --- generic export data ---
|
||||||
|
|
||||||
|
// populate type map with predeclared "known" types
|
||||||
|
for index, typ := range predeclared() {
|
||||||
|
p.typIndex[typ] = index
|
||||||
|
}
|
||||||
|
if len(p.typIndex) != len(predeclared()) {
|
||||||
|
return nil, internalError("duplicate entries in type map?")
|
||||||
|
}
|
||||||
|
|
||||||
|
// write package data
|
||||||
|
p.pkg(pkg, true)
|
||||||
|
if trace {
|
||||||
|
p.tracef("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// write objects
|
||||||
|
objcount := 0
|
||||||
|
scope := pkg.Scope()
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
if !ast.IsExported(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if trace {
|
||||||
|
p.tracef("\n")
|
||||||
|
}
|
||||||
|
p.obj(scope.Lookup(name))
|
||||||
|
objcount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// indicate end of list
|
||||||
|
if trace {
|
||||||
|
p.tracef("\n")
|
||||||
|
}
|
||||||
|
p.tag(endTag)
|
||||||
|
|
||||||
|
// for self-verification only (redundant)
|
||||||
|
p.int(objcount)
|
||||||
|
|
||||||
|
if trace {
|
||||||
|
p.tracef("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- end of export data ---
|
||||||
|
|
||||||
|
return p.out.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
|
||||||
|
if pkg == nil {
|
||||||
|
panic(internalError("unexpected nil pkg"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we saw the package before, write its index (>= 0)
|
||||||
|
if i, ok := p.pkgIndex[pkg]; ok {
|
||||||
|
p.index('P', i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise, remember the package, write the package tag (< 0) and package data
|
||||||
|
if trace {
|
||||||
|
p.tracef("P%d = { ", len(p.pkgIndex))
|
||||||
|
defer p.tracef("} ")
|
||||||
|
}
|
||||||
|
p.pkgIndex[pkg] = len(p.pkgIndex)
|
||||||
|
|
||||||
|
p.tag(packageTag)
|
||||||
|
p.string(pkg.Name())
|
||||||
|
if emptypath {
|
||||||
|
p.string("")
|
||||||
|
} else {
|
||||||
|
p.string(pkg.Path())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) obj(obj types.Object) {
|
||||||
|
switch obj := obj.(type) {
|
||||||
|
case *types.Const:
|
||||||
|
p.tag(constTag)
|
||||||
|
p.pos(obj)
|
||||||
|
p.qualifiedName(obj)
|
||||||
|
p.typ(obj.Type())
|
||||||
|
p.value(obj.Val())
|
||||||
|
|
||||||
|
case *types.TypeName:
|
||||||
|
if obj.IsAlias() {
|
||||||
|
p.tag(aliasTag)
|
||||||
|
p.pos(obj)
|
||||||
|
p.qualifiedName(obj)
|
||||||
|
} else {
|
||||||
|
p.tag(typeTag)
|
||||||
|
}
|
||||||
|
p.typ(obj.Type())
|
||||||
|
|
||||||
|
case *types.Var:
|
||||||
|
p.tag(varTag)
|
||||||
|
p.pos(obj)
|
||||||
|
p.qualifiedName(obj)
|
||||||
|
p.typ(obj.Type())
|
||||||
|
|
||||||
|
case *types.Func:
|
||||||
|
p.tag(funcTag)
|
||||||
|
p.pos(obj)
|
||||||
|
p.qualifiedName(obj)
|
||||||
|
sig := obj.Type().(*types.Signature)
|
||||||
|
p.paramList(sig.Params(), sig.Variadic())
|
||||||
|
p.paramList(sig.Results(), false)
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) pos(obj types.Object) {
|
||||||
|
if !p.posInfoFormat {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
file, line := p.fileLine(obj)
|
||||||
|
if file == p.prevFile {
|
||||||
|
// common case: write line delta
|
||||||
|
// delta == 0 means different file or no line change
|
||||||
|
delta := line - p.prevLine
|
||||||
|
p.int(delta)
|
||||||
|
if delta == 0 {
|
||||||
|
p.int(-1) // -1 means no file change
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// different file
|
||||||
|
p.int(0)
|
||||||
|
// Encode filename as length of common prefix with previous
|
||||||
|
// filename, followed by (possibly empty) suffix. Filenames
|
||||||
|
// frequently share path prefixes, so this can save a lot
|
||||||
|
// of space and make export data size less dependent on file
|
||||||
|
// path length. The suffix is unlikely to be empty because
|
||||||
|
// file names tend to end in ".go".
|
||||||
|
n := commonPrefixLen(p.prevFile, file)
|
||||||
|
p.int(n) // n >= 0
|
||||||
|
p.string(file[n:]) // write suffix only
|
||||||
|
p.prevFile = file
|
||||||
|
p.int(line)
|
||||||
|
}
|
||||||
|
p.prevLine = line
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
|
||||||
|
if p.fset != nil {
|
||||||
|
pos := p.fset.Position(obj.Pos())
|
||||||
|
file = pos.Filename
|
||||||
|
line = pos.Line
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func commonPrefixLen(a, b string) int {
|
||||||
|
if len(a) > len(b) {
|
||||||
|
a, b = b, a
|
||||||
|
}
|
||||||
|
// len(a) <= len(b)
|
||||||
|
i := 0
|
||||||
|
for i < len(a) && a[i] == b[i] {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) qualifiedName(obj types.Object) {
|
||||||
|
p.string(obj.Name())
|
||||||
|
p.pkg(obj.Pkg(), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) typ(t types.Type) {
|
||||||
|
if t == nil {
|
||||||
|
panic(internalError("nil type"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Possible optimization: Anonymous pointer types *T where
|
||||||
|
// T is a named type are common. We could canonicalize all
|
||||||
|
// such types *T to a single type PT = *T. This would lead
|
||||||
|
// to at most one *T entry in typIndex, and all future *T's
|
||||||
|
// would be encoded as the respective index directly. Would
|
||||||
|
// save 1 byte (pointerTag) per *T and reduce the typIndex
|
||||||
|
// size (at the cost of a canonicalization map). We can do
|
||||||
|
// this later, without encoding format change.
|
||||||
|
|
||||||
|
// if we saw the type before, write its index (>= 0)
|
||||||
|
if i, ok := p.typIndex[t]; ok {
|
||||||
|
p.index('T', i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise, remember the type, write the type tag (< 0) and type data
|
||||||
|
if trackAllTypes {
|
||||||
|
if trace {
|
||||||
|
p.tracef("T%d = {>\n", len(p.typIndex))
|
||||||
|
defer p.tracef("<\n} ")
|
||||||
|
}
|
||||||
|
p.typIndex[t] = len(p.typIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t := t.(type) {
|
||||||
|
case *types.Named:
|
||||||
|
if !trackAllTypes {
|
||||||
|
// if we don't track all types, track named types now
|
||||||
|
p.typIndex[t] = len(p.typIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.tag(namedTag)
|
||||||
|
p.pos(t.Obj())
|
||||||
|
p.qualifiedName(t.Obj())
|
||||||
|
p.typ(t.Underlying())
|
||||||
|
if !types.IsInterface(t) {
|
||||||
|
p.assocMethods(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
case *types.Array:
|
||||||
|
p.tag(arrayTag)
|
||||||
|
p.int64(t.Len())
|
||||||
|
p.typ(t.Elem())
|
||||||
|
|
||||||
|
case *types.Slice:
|
||||||
|
p.tag(sliceTag)
|
||||||
|
p.typ(t.Elem())
|
||||||
|
|
||||||
|
case *dddSlice:
|
||||||
|
p.tag(dddTag)
|
||||||
|
p.typ(t.elem)
|
||||||
|
|
||||||
|
case *types.Struct:
|
||||||
|
p.tag(structTag)
|
||||||
|
p.fieldList(t)
|
||||||
|
|
||||||
|
case *types.Pointer:
|
||||||
|
p.tag(pointerTag)
|
||||||
|
p.typ(t.Elem())
|
||||||
|
|
||||||
|
case *types.Signature:
|
||||||
|
p.tag(signatureTag)
|
||||||
|
p.paramList(t.Params(), t.Variadic())
|
||||||
|
p.paramList(t.Results(), false)
|
||||||
|
|
||||||
|
case *types.Interface:
|
||||||
|
p.tag(interfaceTag)
|
||||||
|
p.iface(t)
|
||||||
|
|
||||||
|
case *types.Map:
|
||||||
|
p.tag(mapTag)
|
||||||
|
p.typ(t.Key())
|
||||||
|
p.typ(t.Elem())
|
||||||
|
|
||||||
|
case *types.Chan:
|
||||||
|
p.tag(chanTag)
|
||||||
|
p.int(int(3 - t.Dir())) // hack
|
||||||
|
p.typ(t.Elem())
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected type %T: %s", t, t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) assocMethods(named *types.Named) {
|
||||||
|
// Sort methods (for determinism).
|
||||||
|
var methods []*types.Func
|
||||||
|
for i := 0; i < named.NumMethods(); i++ {
|
||||||
|
methods = append(methods, named.Method(i))
|
||||||
|
}
|
||||||
|
sort.Sort(methodsByName(methods))
|
||||||
|
|
||||||
|
p.int(len(methods))
|
||||||
|
|
||||||
|
if trace && methods != nil {
|
||||||
|
p.tracef("associated methods {>\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, m := range methods {
|
||||||
|
if trace && i > 0 {
|
||||||
|
p.tracef("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pos(m)
|
||||||
|
name := m.Name()
|
||||||
|
p.string(name)
|
||||||
|
if !exported(name) {
|
||||||
|
p.pkg(m.Pkg(), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := m.Type().(*types.Signature)
|
||||||
|
p.paramList(types.NewTuple(sig.Recv()), false)
|
||||||
|
p.paramList(sig.Params(), sig.Variadic())
|
||||||
|
p.paramList(sig.Results(), false)
|
||||||
|
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
|
||||||
|
}
|
||||||
|
|
||||||
|
if trace && methods != nil {
|
||||||
|
p.tracef("<\n} ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type methodsByName []*types.Func
|
||||||
|
|
||||||
|
func (x methodsByName) Len() int { return len(x) }
|
||||||
|
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
|
||||||
|
|
||||||
|
func (p *exporter) fieldList(t *types.Struct) {
|
||||||
|
if trace && t.NumFields() > 0 {
|
||||||
|
p.tracef("fields {>\n")
|
||||||
|
defer p.tracef("<\n} ")
|
||||||
|
}
|
||||||
|
|
||||||
|
p.int(t.NumFields())
|
||||||
|
for i := 0; i < t.NumFields(); i++ {
|
||||||
|
if trace && i > 0 {
|
||||||
|
p.tracef("\n")
|
||||||
|
}
|
||||||
|
p.field(t.Field(i))
|
||||||
|
p.string(t.Tag(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) field(f *types.Var) {
|
||||||
|
if !f.IsField() {
|
||||||
|
panic(internalError("field expected"))
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pos(f)
|
||||||
|
p.fieldName(f)
|
||||||
|
p.typ(f.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) iface(t *types.Interface) {
|
||||||
|
// TODO(gri): enable importer to load embedded interfaces,
|
||||||
|
// then emit Embeddeds and ExplicitMethods separately here.
|
||||||
|
p.int(0)
|
||||||
|
|
||||||
|
n := t.NumMethods()
|
||||||
|
if trace && n > 0 {
|
||||||
|
p.tracef("methods {>\n")
|
||||||
|
defer p.tracef("<\n} ")
|
||||||
|
}
|
||||||
|
p.int(n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if trace && i > 0 {
|
||||||
|
p.tracef("\n")
|
||||||
|
}
|
||||||
|
p.method(t.Method(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) method(m *types.Func) {
|
||||||
|
sig := m.Type().(*types.Signature)
|
||||||
|
if sig.Recv() == nil {
|
||||||
|
panic(internalError("method expected"))
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pos(m)
|
||||||
|
p.string(m.Name())
|
||||||
|
if m.Name() != "_" && !ast.IsExported(m.Name()) {
|
||||||
|
p.pkg(m.Pkg(), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// interface method; no need to encode receiver.
|
||||||
|
p.paramList(sig.Params(), sig.Variadic())
|
||||||
|
p.paramList(sig.Results(), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) fieldName(f *types.Var) {
|
||||||
|
name := f.Name()
|
||||||
|
|
||||||
|
if f.Anonymous() {
|
||||||
|
// anonymous field - we distinguish between 3 cases:
|
||||||
|
// 1) field name matches base type name and is exported
|
||||||
|
// 2) field name matches base type name and is not exported
|
||||||
|
// 3) field name doesn't match base type name (alias name)
|
||||||
|
bname := basetypeName(f.Type())
|
||||||
|
if name == bname {
|
||||||
|
if ast.IsExported(name) {
|
||||||
|
name = "" // 1) we don't need to know the field name or package
|
||||||
|
} else {
|
||||||
|
name = "?" // 2) use unexported name "?" to force package export
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// 3) indicate alias and export name as is
|
||||||
|
// (this requires an extra "@" but this is a rare case)
|
||||||
|
p.string("@")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.string(name)
|
||||||
|
if name != "" && !ast.IsExported(name) {
|
||||||
|
p.pkg(f.Pkg(), false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func basetypeName(typ types.Type) string {
|
||||||
|
switch typ := deref(typ).(type) {
|
||||||
|
case *types.Basic:
|
||||||
|
return typ.Name()
|
||||||
|
case *types.Named:
|
||||||
|
return typ.Obj().Name()
|
||||||
|
default:
|
||||||
|
return "" // unnamed type
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
|
||||||
|
// use negative length to indicate unnamed parameters
|
||||||
|
// (look at the first parameter only since either all
|
||||||
|
// names are present or all are absent)
|
||||||
|
n := params.Len()
|
||||||
|
if n > 0 && params.At(0).Name() == "" {
|
||||||
|
n = -n
|
||||||
|
}
|
||||||
|
p.int(n)
|
||||||
|
for i := 0; i < params.Len(); i++ {
|
||||||
|
q := params.At(i)
|
||||||
|
t := q.Type()
|
||||||
|
if variadic && i == params.Len()-1 {
|
||||||
|
t = &dddSlice{t.(*types.Slice).Elem()}
|
||||||
|
}
|
||||||
|
p.typ(t)
|
||||||
|
if n > 0 {
|
||||||
|
name := q.Name()
|
||||||
|
p.string(name)
|
||||||
|
if name != "_" {
|
||||||
|
p.pkg(q.Pkg(), false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.string("") // no compiler-specific info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) value(x constant.Value) {
|
||||||
|
if trace {
|
||||||
|
p.tracef("= ")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch x.Kind() {
|
||||||
|
case constant.Bool:
|
||||||
|
tag := falseTag
|
||||||
|
if constant.BoolVal(x) {
|
||||||
|
tag = trueTag
|
||||||
|
}
|
||||||
|
p.tag(tag)
|
||||||
|
|
||||||
|
case constant.Int:
|
||||||
|
if v, exact := constant.Int64Val(x); exact {
|
||||||
|
// common case: x fits into an int64 - use compact encoding
|
||||||
|
p.tag(int64Tag)
|
||||||
|
p.int64(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// uncommon case: large x - use float encoding
|
||||||
|
// (powers of 2 will be encoded efficiently with exponent)
|
||||||
|
p.tag(floatTag)
|
||||||
|
p.float(constant.ToFloat(x))
|
||||||
|
|
||||||
|
case constant.Float:
|
||||||
|
p.tag(floatTag)
|
||||||
|
p.float(x)
|
||||||
|
|
||||||
|
case constant.Complex:
|
||||||
|
p.tag(complexTag)
|
||||||
|
p.float(constant.Real(x))
|
||||||
|
p.float(constant.Imag(x))
|
||||||
|
|
||||||
|
case constant.String:
|
||||||
|
p.tag(stringTag)
|
||||||
|
p.string(constant.StringVal(x))
|
||||||
|
|
||||||
|
case constant.Unknown:
|
||||||
|
// package contains type errors
|
||||||
|
p.tag(unknownTag)
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected value %v (%T)", x, x))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) float(x constant.Value) {
|
||||||
|
if x.Kind() != constant.Float {
|
||||||
|
panic(internalErrorf("unexpected constant %v, want float", x))
|
||||||
|
}
|
||||||
|
// extract sign (there is no -0)
|
||||||
|
sign := constant.Sign(x)
|
||||||
|
if sign == 0 {
|
||||||
|
// x == 0
|
||||||
|
p.int(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// x != 0
|
||||||
|
|
||||||
|
var f big.Float
|
||||||
|
if v, exact := constant.Float64Val(x); exact {
|
||||||
|
// float64
|
||||||
|
f.SetFloat64(v)
|
||||||
|
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
|
||||||
|
// TODO(gri): add big.Rat accessor to constant.Value.
|
||||||
|
r := valueToRat(num)
|
||||||
|
f.SetRat(r.Quo(r, valueToRat(denom)))
|
||||||
|
} else {
|
||||||
|
// Value too large to represent as a fraction => inaccessible.
|
||||||
|
// TODO(gri): add big.Float accessor to constant.Value.
|
||||||
|
f.SetFloat64(math.MaxFloat64) // FIXME
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract exponent such that 0.5 <= m < 1.0
|
||||||
|
var m big.Float
|
||||||
|
exp := f.MantExp(&m)
|
||||||
|
|
||||||
|
// extract mantissa as *big.Int
|
||||||
|
// - set exponent large enough so mant satisfies mant.IsInt()
|
||||||
|
// - get *big.Int from mant
|
||||||
|
m.SetMantExp(&m, int(m.MinPrec()))
|
||||||
|
mant, acc := m.Int(nil)
|
||||||
|
if acc != big.Exact {
|
||||||
|
panic(internalError("internal error"))
|
||||||
|
}
|
||||||
|
|
||||||
|
p.int(sign)
|
||||||
|
p.int(exp)
|
||||||
|
p.string(string(mant.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func valueToRat(x constant.Value) *big.Rat {
|
||||||
|
// Convert little-endian to big-endian.
|
||||||
|
// I can't believe this is necessary.
|
||||||
|
bytes := constant.Bytes(x)
|
||||||
|
for i := 0; i < len(bytes)/2; i++ {
|
||||||
|
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
|
||||||
|
}
|
||||||
|
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) bool(b bool) bool {
|
||||||
|
if trace {
|
||||||
|
p.tracef("[")
|
||||||
|
defer p.tracef("= %v] ", b)
|
||||||
|
}
|
||||||
|
|
||||||
|
x := 0
|
||||||
|
if b {
|
||||||
|
x = 1
|
||||||
|
}
|
||||||
|
p.int(x)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Low-level encoders
|
||||||
|
|
||||||
|
func (p *exporter) index(marker byte, index int) {
|
||||||
|
if index < 0 {
|
||||||
|
panic(internalError("invalid index < 0"))
|
||||||
|
}
|
||||||
|
if debugFormat {
|
||||||
|
p.marker('t')
|
||||||
|
}
|
||||||
|
if trace {
|
||||||
|
p.tracef("%c%d ", marker, index)
|
||||||
|
}
|
||||||
|
p.rawInt64(int64(index))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) tag(tag int) {
|
||||||
|
if tag >= 0 {
|
||||||
|
panic(internalError("invalid tag >= 0"))
|
||||||
|
}
|
||||||
|
if debugFormat {
|
||||||
|
p.marker('t')
|
||||||
|
}
|
||||||
|
if trace {
|
||||||
|
p.tracef("%s ", tagString[-tag])
|
||||||
|
}
|
||||||
|
p.rawInt64(int64(tag))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) int(x int) {
|
||||||
|
p.int64(int64(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) int64(x int64) {
|
||||||
|
if debugFormat {
|
||||||
|
p.marker('i')
|
||||||
|
}
|
||||||
|
if trace {
|
||||||
|
p.tracef("%d ", x)
|
||||||
|
}
|
||||||
|
p.rawInt64(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *exporter) string(s string) {
|
||||||
|
if debugFormat {
|
||||||
|
p.marker('s')
|
||||||
|
}
|
||||||
|
if trace {
|
||||||
|
p.tracef("%q ", s)
|
||||||
|
}
|
||||||
|
// if we saw the string before, write its index (>= 0)
|
||||||
|
// (the empty string is mapped to 0)
|
||||||
|
if i, ok := p.strIndex[s]; ok {
|
||||||
|
p.rawInt64(int64(i))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// otherwise, remember string and write its negative length and bytes
|
||||||
|
p.strIndex[s] = len(p.strIndex)
|
||||||
|
p.rawInt64(-int64(len(s)))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
p.rawByte(s[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// marker emits a marker byte and position information which makes
|
||||||
|
// it easy for a reader to detect if it is "out of sync". Used for
|
||||||
|
// debugFormat format only.
|
||||||
|
func (p *exporter) marker(m byte) {
|
||||||
|
p.rawByte(m)
|
||||||
|
// Enable this for help tracking down the location
|
||||||
|
// of an incorrect marker when running in debugFormat.
|
||||||
|
if false && trace {
|
||||||
|
p.tracef("#%d ", p.written)
|
||||||
|
}
|
||||||
|
p.rawInt64(int64(p.written))
|
||||||
|
}
|
||||||
|
|
||||||
|
// rawInt64 should only be used by low-level encoders.
|
||||||
|
func (p *exporter) rawInt64(x int64) {
|
||||||
|
var tmp [binary.MaxVarintLen64]byte
|
||||||
|
n := binary.PutVarint(tmp[:], x)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
p.rawByte(tmp[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rawStringln should only be used to emit the initial version string.
|
||||||
|
func (p *exporter) rawStringln(s string) {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
p.rawByte(s[i])
|
||||||
|
}
|
||||||
|
p.rawByte('\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
// rawByte is the bottleneck interface to write to p.out.
|
||||||
|
// rawByte escapes b as follows (any encoding does that
|
||||||
|
// hides '$'):
|
||||||
|
//
|
||||||
|
// '$' => '|' 'S'
|
||||||
|
// '|' => '|' '|'
|
||||||
|
//
|
||||||
|
// Necessary so other tools can find the end of the
|
||||||
|
// export data by searching for "$$".
|
||||||
|
// rawByte should only be used by low-level encoders.
|
||||||
|
func (p *exporter) rawByte(b byte) {
|
||||||
|
switch b {
|
||||||
|
case '$':
|
||||||
|
// write '$' as '|' 'S'
|
||||||
|
b = 'S'
|
||||||
|
fallthrough
|
||||||
|
case '|':
|
||||||
|
// write '|' as '|' '|'
|
||||||
|
p.out.WriteByte('|')
|
||||||
|
p.written++
|
||||||
|
}
|
||||||
|
p.out.WriteByte(b)
|
||||||
|
p.written++
|
||||||
|
}
|
||||||
|
|
||||||
|
// tracef is like fmt.Printf but it rewrites the format string
|
||||||
|
// to take care of indentation.
|
||||||
|
func (p *exporter) tracef(format string, args ...interface{}) {
|
||||||
|
if strings.ContainsAny(format, "<>\n") {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for i := 0; i < len(format); i++ {
|
||||||
|
// no need to deal with runes
|
||||||
|
ch := format[i]
|
||||||
|
switch ch {
|
||||||
|
case '>':
|
||||||
|
p.indent++
|
||||||
|
continue
|
||||||
|
case '<':
|
||||||
|
p.indent--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
buf.WriteByte(ch)
|
||||||
|
if ch == '\n' {
|
||||||
|
for j := p.indent; j > 0; j-- {
|
||||||
|
buf.WriteString(". ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
format = buf.String()
|
||||||
|
}
|
||||||
|
fmt.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugging support.
|
||||||
|
// (tagString is only used when tracing is enabled)
|
||||||
|
var tagString = [...]string{
|
||||||
|
// Packages
|
||||||
|
-packageTag: "package",
|
||||||
|
|
||||||
|
// Types
|
||||||
|
-namedTag: "named type",
|
||||||
|
-arrayTag: "array",
|
||||||
|
-sliceTag: "slice",
|
||||||
|
-dddTag: "ddd",
|
||||||
|
-structTag: "struct",
|
||||||
|
-pointerTag: "pointer",
|
||||||
|
-signatureTag: "signature",
|
||||||
|
-interfaceTag: "interface",
|
||||||
|
-mapTag: "map",
|
||||||
|
-chanTag: "chan",
|
||||||
|
|
||||||
|
// Values
|
||||||
|
-falseTag: "false",
|
||||||
|
-trueTag: "true",
|
||||||
|
-int64Tag: "int64",
|
||||||
|
-floatTag: "float",
|
||||||
|
-fractionTag: "fraction",
|
||||||
|
-complexTag: "complex",
|
||||||
|
-stringTag: "string",
|
||||||
|
-unknownTag: "unknown",
|
||||||
|
|
||||||
|
// Type aliases
|
||||||
|
-aliasTag: "alias",
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,93 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
|
||||||
|
|
||||||
|
// This file implements FindExportData.
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
|
||||||
|
// See $GOROOT/include/ar.h.
|
||||||
|
hdr := make([]byte, 16+12+6+6+8+10+2)
|
||||||
|
_, err = io.ReadFull(r, hdr)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// leave for debugging
|
||||||
|
if false {
|
||||||
|
fmt.Printf("header: %s", hdr)
|
||||||
|
}
|
||||||
|
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
|
||||||
|
size, err = strconv.Atoi(s)
|
||||||
|
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
|
||||||
|
err = fmt.Errorf("invalid archive header")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
name = strings.TrimSpace(string(hdr[:16]))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindExportData positions the reader r at the beginning of the
|
||||||
|
// export data section of an underlying GC-created object/archive
|
||||||
|
// file by reading from it. The reader must be positioned at the
|
||||||
|
// start of the file before calling this function. The hdr result
|
||||||
|
// is the string before the export data, either "$$" or "$$B".
|
||||||
|
//
|
||||||
|
func FindExportData(r *bufio.Reader) (hdr string, err error) {
|
||||||
|
// Read first line to make sure this is an object file.
|
||||||
|
line, err := r.ReadSlice('\n')
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("can't find export data (%v)", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(line) == "!<arch>\n" {
|
||||||
|
// Archive file. Scan to __.PKGDEF.
|
||||||
|
var name string
|
||||||
|
if name, _, err = readGopackHeader(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// First entry should be __.PKGDEF.
|
||||||
|
if name != "__.PKGDEF" {
|
||||||
|
err = fmt.Errorf("go archive is missing __.PKGDEF")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read first line of __.PKGDEF data, so that line
|
||||||
|
// is once again the first line of the input.
|
||||||
|
if line, err = r.ReadSlice('\n'); err != nil {
|
||||||
|
err = fmt.Errorf("can't find export data (%v)", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now at __.PKGDEF in archive or still at beginning of file.
|
||||||
|
// Either way, line should begin with "go object ".
|
||||||
|
if !strings.HasPrefix(string(line), "go object ") {
|
||||||
|
err = fmt.Errorf("not a Go object file")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip over object header to export data.
|
||||||
|
// Begins after first line starting with $$.
|
||||||
|
for line[0] != '$' {
|
||||||
|
if line, err = r.ReadSlice('\n'); err != nil {
|
||||||
|
err = fmt.Errorf("can't find export data (%v)", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hdr = string(line)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,723 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Indexed binary package export.
|
||||||
|
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
|
||||||
|
// see that file for specification of the format.
|
||||||
|
|
||||||
|
// +build go1.11
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"go/ast"
|
||||||
|
"go/constant"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Current indexed export format version. Increase with each format change.
|
||||||
|
// 0: Go1.11 encoding
|
||||||
|
const iexportVersion = 0
|
||||||
|
|
||||||
|
// IExportData returns the binary export data for pkg.
|
||||||
|
// If no file set is provided, position info will be missing.
|
||||||
|
func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
if ierr, ok := e.(internalError); ok {
|
||||||
|
err = ierr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Not an internal error; panic again.
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
p := iexporter{
|
||||||
|
out: bytes.NewBuffer(nil),
|
||||||
|
fset: fset,
|
||||||
|
allPkgs: map[*types.Package]bool{},
|
||||||
|
stringIndex: map[string]uint64{},
|
||||||
|
declIndex: map[types.Object]uint64{},
|
||||||
|
typIndex: map[types.Type]uint64{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, pt := range predeclared() {
|
||||||
|
p.typIndex[pt] = uint64(i)
|
||||||
|
}
|
||||||
|
if len(p.typIndex) > predeclReserved {
|
||||||
|
panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize work queue with exported declarations.
|
||||||
|
scope := pkg.Scope()
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
if ast.IsExported(name) {
|
||||||
|
p.pushDecl(scope.Lookup(name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop until no more work.
|
||||||
|
for !p.declTodo.empty() {
|
||||||
|
p.doDecl(p.declTodo.popHead())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append indices to data0 section.
|
||||||
|
dataLen := uint64(p.data0.Len())
|
||||||
|
w := p.newWriter()
|
||||||
|
w.writeIndex(p.declIndex, pkg)
|
||||||
|
w.flush()
|
||||||
|
|
||||||
|
// Assemble header.
|
||||||
|
var hdr intWriter
|
||||||
|
hdr.WriteByte('i')
|
||||||
|
hdr.uint64(iexportVersion)
|
||||||
|
hdr.uint64(uint64(p.strings.Len()))
|
||||||
|
hdr.uint64(dataLen)
|
||||||
|
|
||||||
|
// Flush output.
|
||||||
|
io.Copy(p.out, &hdr)
|
||||||
|
io.Copy(p.out, &p.strings)
|
||||||
|
io.Copy(p.out, &p.data0)
|
||||||
|
|
||||||
|
return p.out.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeIndex writes out an object index. mainIndex indicates whether
|
||||||
|
// we're writing out the main index, which is also read by
|
||||||
|
// non-compiler tools and includes a complete package description
|
||||||
|
// (i.e., name and height).
|
||||||
|
func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
|
||||||
|
// Build a map from packages to objects from that package.
|
||||||
|
pkgObjs := map[*types.Package][]types.Object{}
|
||||||
|
|
||||||
|
// For the main index, make sure to include every package that
|
||||||
|
// we reference, even if we're not exporting (or reexporting)
|
||||||
|
// any symbols from it.
|
||||||
|
pkgObjs[localpkg] = nil
|
||||||
|
for pkg := range w.p.allPkgs {
|
||||||
|
pkgObjs[pkg] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for obj := range index {
|
||||||
|
pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkgs []*types.Package
|
||||||
|
for pkg, objs := range pkgObjs {
|
||||||
|
pkgs = append(pkgs, pkg)
|
||||||
|
|
||||||
|
sort.Slice(objs, func(i, j int) bool {
|
||||||
|
return objs[i].Name() < objs[j].Name()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(pkgs, func(i, j int) bool {
|
||||||
|
return pkgs[i].Path() < pkgs[j].Path()
|
||||||
|
})
|
||||||
|
|
||||||
|
w.uint64(uint64(len(pkgs)))
|
||||||
|
for _, pkg := range pkgs {
|
||||||
|
w.string(pkg.Path())
|
||||||
|
w.string(pkg.Name())
|
||||||
|
w.uint64(uint64(0)) // package height is not needed for go/types
|
||||||
|
|
||||||
|
objs := pkgObjs[pkg]
|
||||||
|
w.uint64(uint64(len(objs)))
|
||||||
|
for _, obj := range objs {
|
||||||
|
w.string(obj.Name())
|
||||||
|
w.uint64(index[obj])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type iexporter struct {
|
||||||
|
fset *token.FileSet
|
||||||
|
out *bytes.Buffer
|
||||||
|
|
||||||
|
// allPkgs tracks all packages that have been referenced by
|
||||||
|
// the export data, so we can ensure to include them in the
|
||||||
|
// main index.
|
||||||
|
allPkgs map[*types.Package]bool
|
||||||
|
|
||||||
|
declTodo objQueue
|
||||||
|
|
||||||
|
strings intWriter
|
||||||
|
stringIndex map[string]uint64
|
||||||
|
|
||||||
|
data0 intWriter
|
||||||
|
declIndex map[types.Object]uint64
|
||||||
|
typIndex map[types.Type]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringOff returns the offset of s within the string section.
|
||||||
|
// If not already present, it's added to the end.
|
||||||
|
func (p *iexporter) stringOff(s string) uint64 {
|
||||||
|
off, ok := p.stringIndex[s]
|
||||||
|
if !ok {
|
||||||
|
off = uint64(p.strings.Len())
|
||||||
|
p.stringIndex[s] = off
|
||||||
|
|
||||||
|
p.strings.uint64(uint64(len(s)))
|
||||||
|
p.strings.WriteString(s)
|
||||||
|
}
|
||||||
|
return off
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushDecl adds n to the declaration work queue, if not already present.
|
||||||
|
func (p *iexporter) pushDecl(obj types.Object) {
|
||||||
|
// Package unsafe is known to the compiler and predeclared.
|
||||||
|
assert(obj.Pkg() != types.Unsafe)
|
||||||
|
|
||||||
|
if _, ok := p.declIndex[obj]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.declIndex[obj] = ^uint64(0) // mark n present in work queue
|
||||||
|
p.declTodo.pushTail(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportWriter handles writing out individual data section chunks.
|
||||||
|
type exportWriter struct {
|
||||||
|
p *iexporter
|
||||||
|
|
||||||
|
data intWriter
|
||||||
|
currPkg *types.Package
|
||||||
|
prevFile string
|
||||||
|
prevLine int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iexporter) doDecl(obj types.Object) {
|
||||||
|
w := p.newWriter()
|
||||||
|
w.setPkg(obj.Pkg(), false)
|
||||||
|
|
||||||
|
switch obj := obj.(type) {
|
||||||
|
case *types.Var:
|
||||||
|
w.tag('V')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.typ(obj.Type(), obj.Pkg())
|
||||||
|
|
||||||
|
case *types.Func:
|
||||||
|
sig, _ := obj.Type().(*types.Signature)
|
||||||
|
if sig.Recv() != nil {
|
||||||
|
panic(internalErrorf("unexpected method: %v", sig))
|
||||||
|
}
|
||||||
|
w.tag('F')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.signature(sig)
|
||||||
|
|
||||||
|
case *types.Const:
|
||||||
|
w.tag('C')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.value(obj.Type(), obj.Val())
|
||||||
|
|
||||||
|
case *types.TypeName:
|
||||||
|
if obj.IsAlias() {
|
||||||
|
w.tag('A')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.typ(obj.Type(), obj.Pkg())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defined type.
|
||||||
|
w.tag('T')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
|
||||||
|
underlying := obj.Type().Underlying()
|
||||||
|
w.typ(underlying, obj.Pkg())
|
||||||
|
|
||||||
|
t := obj.Type()
|
||||||
|
if types.IsInterface(t) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
named, ok := t.(*types.Named)
|
||||||
|
if !ok {
|
||||||
|
panic(internalErrorf("%s is not a defined type", t))
|
||||||
|
}
|
||||||
|
|
||||||
|
n := named.NumMethods()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
m := named.Method(i)
|
||||||
|
w.pos(m.Pos())
|
||||||
|
w.string(m.Name())
|
||||||
|
sig, _ := m.Type().(*types.Signature)
|
||||||
|
w.param(sig.Recv())
|
||||||
|
w.signature(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected object: %v", obj))
|
||||||
|
}
|
||||||
|
|
||||||
|
p.declIndex[obj] = w.flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) tag(tag byte) {
|
||||||
|
w.data.WriteByte(tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) pos(pos token.Pos) {
|
||||||
|
p := w.p.fset.Position(pos)
|
||||||
|
file := p.Filename
|
||||||
|
line := int64(p.Line)
|
||||||
|
|
||||||
|
// When file is the same as the last position (common case),
|
||||||
|
// we can save a few bytes by delta encoding just the line
|
||||||
|
// number.
|
||||||
|
//
|
||||||
|
// Note: Because data objects may be read out of order (or not
|
||||||
|
// at all), we can only apply delta encoding within a single
|
||||||
|
// object. This is handled implicitly by tracking prevFile and
|
||||||
|
// prevLine as fields of exportWriter.
|
||||||
|
|
||||||
|
if file == w.prevFile {
|
||||||
|
delta := line - w.prevLine
|
||||||
|
w.int64(delta)
|
||||||
|
if delta == deltaNewFile {
|
||||||
|
w.int64(-1)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
w.int64(deltaNewFile)
|
||||||
|
w.int64(line) // line >= 0
|
||||||
|
w.string(file)
|
||||||
|
w.prevFile = file
|
||||||
|
}
|
||||||
|
w.prevLine = line
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) pkg(pkg *types.Package) {
|
||||||
|
// Ensure any referenced packages are declared in the main index.
|
||||||
|
w.p.allPkgs[pkg] = true
|
||||||
|
|
||||||
|
w.string(pkg.Path())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) qualifiedIdent(obj types.Object) {
|
||||||
|
// Ensure any referenced declarations are written out too.
|
||||||
|
w.p.pushDecl(obj)
|
||||||
|
|
||||||
|
w.string(obj.Name())
|
||||||
|
w.pkg(obj.Pkg())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
|
||||||
|
w.data.uint64(w.p.typOff(t, pkg))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iexporter) newWriter() *exportWriter {
|
||||||
|
return &exportWriter{p: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) flush() uint64 {
|
||||||
|
off := uint64(w.p.data0.Len())
|
||||||
|
io.Copy(&w.p.data0, &w.data)
|
||||||
|
return off
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
|
||||||
|
off, ok := p.typIndex[t]
|
||||||
|
if !ok {
|
||||||
|
w := p.newWriter()
|
||||||
|
w.doTyp(t, pkg)
|
||||||
|
off = predeclReserved + w.flush()
|
||||||
|
p.typIndex[t] = off
|
||||||
|
}
|
||||||
|
return off
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) startType(k itag) {
|
||||||
|
w.data.uint64(uint64(k))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
|
||||||
|
switch t := t.(type) {
|
||||||
|
case *types.Named:
|
||||||
|
w.startType(definedType)
|
||||||
|
w.qualifiedIdent(t.Obj())
|
||||||
|
|
||||||
|
case *types.Pointer:
|
||||||
|
w.startType(pointerType)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Slice:
|
||||||
|
w.startType(sliceType)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Array:
|
||||||
|
w.startType(arrayType)
|
||||||
|
w.uint64(uint64(t.Len()))
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Chan:
|
||||||
|
w.startType(chanType)
|
||||||
|
// 1 RecvOnly; 2 SendOnly; 3 SendRecv
|
||||||
|
var dir uint64
|
||||||
|
switch t.Dir() {
|
||||||
|
case types.RecvOnly:
|
||||||
|
dir = 1
|
||||||
|
case types.SendOnly:
|
||||||
|
dir = 2
|
||||||
|
case types.SendRecv:
|
||||||
|
dir = 3
|
||||||
|
}
|
||||||
|
w.uint64(dir)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Map:
|
||||||
|
w.startType(mapType)
|
||||||
|
w.typ(t.Key(), pkg)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Signature:
|
||||||
|
w.startType(signatureType)
|
||||||
|
w.setPkg(pkg, true)
|
||||||
|
w.signature(t)
|
||||||
|
|
||||||
|
case *types.Struct:
|
||||||
|
w.startType(structType)
|
||||||
|
w.setPkg(pkg, true)
|
||||||
|
|
||||||
|
n := t.NumFields()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
w.pos(f.Pos())
|
||||||
|
w.string(f.Name())
|
||||||
|
w.typ(f.Type(), pkg)
|
||||||
|
w.bool(f.Embedded())
|
||||||
|
w.string(t.Tag(i)) // note (or tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
case *types.Interface:
|
||||||
|
w.startType(interfaceType)
|
||||||
|
w.setPkg(pkg, true)
|
||||||
|
|
||||||
|
n := t.NumEmbeddeds()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Embedded(i)
|
||||||
|
w.pos(f.Obj().Pos())
|
||||||
|
w.typ(f.Obj().Type(), f.Obj().Pkg())
|
||||||
|
}
|
||||||
|
|
||||||
|
n = t.NumExplicitMethods()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
m := t.ExplicitMethod(i)
|
||||||
|
w.pos(m.Pos())
|
||||||
|
w.string(m.Name())
|
||||||
|
sig, _ := m.Type().(*types.Signature)
|
||||||
|
w.signature(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
|
||||||
|
if write {
|
||||||
|
w.pkg(pkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.currPkg = pkg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) signature(sig *types.Signature) {
|
||||||
|
w.paramList(sig.Params())
|
||||||
|
w.paramList(sig.Results())
|
||||||
|
if sig.Params().Len() > 0 {
|
||||||
|
w.bool(sig.Variadic())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) paramList(tup *types.Tuple) {
|
||||||
|
n := tup.Len()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
w.param(tup.At(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) param(obj types.Object) {
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.localIdent(obj)
|
||||||
|
w.typ(obj.Type(), obj.Pkg())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) value(typ types.Type, v constant.Value) {
|
||||||
|
w.typ(typ, nil)
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
case constant.Bool:
|
||||||
|
w.bool(constant.BoolVal(v))
|
||||||
|
case constant.Int:
|
||||||
|
var i big.Int
|
||||||
|
if i64, exact := constant.Int64Val(v); exact {
|
||||||
|
i.SetInt64(i64)
|
||||||
|
} else if ui64, exact := constant.Uint64Val(v); exact {
|
||||||
|
i.SetUint64(ui64)
|
||||||
|
} else {
|
||||||
|
i.SetString(v.ExactString(), 10)
|
||||||
|
}
|
||||||
|
w.mpint(&i, typ)
|
||||||
|
case constant.Float:
|
||||||
|
f := constantToFloat(v)
|
||||||
|
w.mpfloat(f, typ)
|
||||||
|
case constant.Complex:
|
||||||
|
w.mpfloat(constantToFloat(constant.Real(v)), typ)
|
||||||
|
w.mpfloat(constantToFloat(constant.Imag(v)), typ)
|
||||||
|
case constant.String:
|
||||||
|
w.string(constant.StringVal(v))
|
||||||
|
case constant.Unknown:
|
||||||
|
// package contains type errors
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected value %v (%T)", v, v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// constantToFloat converts a constant.Value with kind constant.Float to a
|
||||||
|
// big.Float.
|
||||||
|
func constantToFloat(x constant.Value) *big.Float {
|
||||||
|
assert(x.Kind() == constant.Float)
|
||||||
|
// Use the same floating-point precision (512) as cmd/compile
|
||||||
|
// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
|
||||||
|
const mpprec = 512
|
||||||
|
var f big.Float
|
||||||
|
f.SetPrec(mpprec)
|
||||||
|
if v, exact := constant.Float64Val(x); exact {
|
||||||
|
// float64
|
||||||
|
f.SetFloat64(v)
|
||||||
|
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
|
||||||
|
// TODO(gri): add big.Rat accessor to constant.Value.
|
||||||
|
n := valueToRat(num)
|
||||||
|
d := valueToRat(denom)
|
||||||
|
f.SetRat(n.Quo(n, d))
|
||||||
|
} else {
|
||||||
|
// Value too large to represent as a fraction => inaccessible.
|
||||||
|
// TODO(gri): add big.Float accessor to constant.Value.
|
||||||
|
_, ok := f.SetString(x.ExactString())
|
||||||
|
assert(ok)
|
||||||
|
}
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
|
||||||
|
// mpint exports a multi-precision integer.
|
||||||
|
//
|
||||||
|
// For unsigned types, small values are written out as a single
|
||||||
|
// byte. Larger values are written out as a length-prefixed big-endian
|
||||||
|
// byte string, where the length prefix is encoded as its complement.
|
||||||
|
// For example, bytes 0, 1, and 2 directly represent the integer
|
||||||
|
// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
|
||||||
|
// 2-, and 3-byte big-endian string follow.
|
||||||
|
//
|
||||||
|
// Encoding for signed types use the same general approach as for
|
||||||
|
// unsigned types, except small values use zig-zag encoding and the
|
||||||
|
// bottom bit of length prefix byte for large values is reserved as a
|
||||||
|
// sign bit.
|
||||||
|
//
|
||||||
|
// The exact boundary between small and large encodings varies
|
||||||
|
// according to the maximum number of bytes needed to encode a value
|
||||||
|
// of type typ. As a special case, 8-bit types are always encoded as a
|
||||||
|
// single byte.
|
||||||
|
//
|
||||||
|
// TODO(mdempsky): Is this level of complexity really worthwhile?
|
||||||
|
func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
|
||||||
|
basic, ok := typ.Underlying().(*types.Basic)
|
||||||
|
if !ok {
|
||||||
|
panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
|
||||||
|
}
|
||||||
|
|
||||||
|
signed, maxBytes := intSize(basic)
|
||||||
|
|
||||||
|
negative := x.Sign() < 0
|
||||||
|
if !signed && negative {
|
||||||
|
panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
|
||||||
|
}
|
||||||
|
|
||||||
|
b := x.Bytes()
|
||||||
|
if len(b) > 0 && b[0] == 0 {
|
||||||
|
panic(internalErrorf("leading zeros"))
|
||||||
|
}
|
||||||
|
if uint(len(b)) > maxBytes {
|
||||||
|
panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
|
||||||
|
}
|
||||||
|
|
||||||
|
maxSmall := 256 - maxBytes
|
||||||
|
if signed {
|
||||||
|
maxSmall = 256 - 2*maxBytes
|
||||||
|
}
|
||||||
|
if maxBytes == 1 {
|
||||||
|
maxSmall = 256
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if x can use small value encoding.
|
||||||
|
if len(b) <= 1 {
|
||||||
|
var ux uint
|
||||||
|
if len(b) == 1 {
|
||||||
|
ux = uint(b[0])
|
||||||
|
}
|
||||||
|
if signed {
|
||||||
|
ux <<= 1
|
||||||
|
if negative {
|
||||||
|
ux--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ux < maxSmall {
|
||||||
|
w.data.WriteByte(byte(ux))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n := 256 - uint(len(b))
|
||||||
|
if signed {
|
||||||
|
n = 256 - 2*uint(len(b))
|
||||||
|
if negative {
|
||||||
|
n |= 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n < maxSmall || n >= 256 {
|
||||||
|
panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
|
||||||
|
}
|
||||||
|
|
||||||
|
w.data.WriteByte(byte(n))
|
||||||
|
w.data.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mpfloat exports a multi-precision floating point number.
|
||||||
|
//
|
||||||
|
// The number's value is decomposed into mantissa × 2**exponent, where
|
||||||
|
// mantissa is an integer. The value is written out as mantissa (as a
|
||||||
|
// multi-precision integer) and then the exponent, except exponent is
|
||||||
|
// omitted if mantissa is zero.
|
||||||
|
func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
|
||||||
|
if f.IsInf() {
|
||||||
|
panic("infinite constant")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
|
||||||
|
var mant big.Float
|
||||||
|
exp := int64(f.MantExp(&mant))
|
||||||
|
|
||||||
|
// Scale so that mant is an integer.
|
||||||
|
prec := mant.MinPrec()
|
||||||
|
mant.SetMantExp(&mant, int(prec))
|
||||||
|
exp -= int64(prec)
|
||||||
|
|
||||||
|
manti, acc := mant.Int(nil)
|
||||||
|
if acc != big.Exact {
|
||||||
|
panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
|
||||||
|
}
|
||||||
|
w.mpint(manti, typ)
|
||||||
|
if manti.Sign() != 0 {
|
||||||
|
w.int64(exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) bool(b bool) bool {
|
||||||
|
var x uint64
|
||||||
|
if b {
|
||||||
|
x = 1
|
||||||
|
}
|
||||||
|
w.uint64(x)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) int64(x int64) { w.data.int64(x) }
|
||||||
|
func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
|
||||||
|
func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
|
||||||
|
|
||||||
|
func (w *exportWriter) localIdent(obj types.Object) {
|
||||||
|
// Anonymous parameters.
|
||||||
|
if obj == nil {
|
||||||
|
w.string("")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
name := obj.Name()
|
||||||
|
if name == "_" {
|
||||||
|
w.string("_")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.string(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type intWriter struct {
|
||||||
|
bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *intWriter) int64(x int64) {
|
||||||
|
var buf [binary.MaxVarintLen64]byte
|
||||||
|
n := binary.PutVarint(buf[:], x)
|
||||||
|
w.Write(buf[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *intWriter) uint64(x uint64) {
|
||||||
|
var buf [binary.MaxVarintLen64]byte
|
||||||
|
n := binary.PutUvarint(buf[:], x)
|
||||||
|
w.Write(buf[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func assert(cond bool) {
|
||||||
|
if !cond {
|
||||||
|
panic("internal error: assertion failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
|
||||||
|
|
||||||
|
// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
|
||||||
|
// a ready-to-use empty queue.
|
||||||
|
type objQueue struct {
|
||||||
|
ring []types.Object
|
||||||
|
head, tail int
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty returns true if q contains no Nodes.
|
||||||
|
func (q *objQueue) empty() bool {
|
||||||
|
return q.head == q.tail
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushTail appends n to the tail of the queue.
|
||||||
|
func (q *objQueue) pushTail(obj types.Object) {
|
||||||
|
if len(q.ring) == 0 {
|
||||||
|
q.ring = make([]types.Object, 16)
|
||||||
|
} else if q.head+len(q.ring) == q.tail {
|
||||||
|
// Grow the ring.
|
||||||
|
nring := make([]types.Object, len(q.ring)*2)
|
||||||
|
// Copy the old elements.
|
||||||
|
part := q.ring[q.head%len(q.ring):]
|
||||||
|
if q.tail-q.head <= len(part) {
|
||||||
|
part = part[:q.tail-q.head]
|
||||||
|
copy(nring, part)
|
||||||
|
} else {
|
||||||
|
pos := copy(nring, part)
|
||||||
|
copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
|
||||||
|
}
|
||||||
|
q.ring, q.head, q.tail = nring, 0, q.tail-q.head
|
||||||
|
}
|
||||||
|
|
||||||
|
q.ring[q.tail%len(q.ring)] = obj
|
||||||
|
q.tail++
|
||||||
|
}
|
||||||
|
|
||||||
|
// popHead pops a node from the head of the queue. It panics if q is empty.
|
||||||
|
func (q *objQueue) popHead() types.Object {
|
||||||
|
if q.empty() {
|
||||||
|
panic("dequeue empty")
|
||||||
|
}
|
||||||
|
obj := q.ring[q.head%len(q.ring)]
|
||||||
|
q.head++
|
||||||
|
return obj
|
||||||
|
}
|
|
@ -0,0 +1,606 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Indexed package import.
|
||||||
|
// See cmd/compile/internal/gc/iexport.go for the export data format.
|
||||||
|
|
||||||
|
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"go/constant"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type intReader struct {
|
||||||
|
*bytes.Reader
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intReader) int64() int64 {
|
||||||
|
i, err := binary.ReadVarint(r.Reader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("import %q: read varint error: %v", r.path, err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intReader) uint64() uint64 {
|
||||||
|
i, err := binary.ReadUvarint(r.Reader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("import %q: read varint error: %v", r.path, err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
const predeclReserved = 32
|
||||||
|
|
||||||
|
type itag uint64
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Types
|
||||||
|
definedType itag = iota
|
||||||
|
pointerType
|
||||||
|
sliceType
|
||||||
|
arrayType
|
||||||
|
chanType
|
||||||
|
mapType
|
||||||
|
signatureType
|
||||||
|
structType
|
||||||
|
interfaceType
|
||||||
|
)
|
||||||
|
|
||||||
|
// IImportData imports a package from the serialized package data
|
||||||
|
// and returns the number of bytes consumed and a reference to the package.
|
||||||
|
// If the export data version is not recognized or the format is otherwise
|
||||||
|
// compromised, an error is returned.
|
||||||
|
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||||
|
const currentVersion = 0
|
||||||
|
version := -1
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
if version > currentVersion {
|
||||||
|
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
r := &intReader{bytes.NewReader(data), path}
|
||||||
|
|
||||||
|
version = int(r.uint64())
|
||||||
|
switch version {
|
||||||
|
case currentVersion:
|
||||||
|
default:
|
||||||
|
errorf("unknown iexport format version %d", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
sLen := int64(r.uint64())
|
||||||
|
dLen := int64(r.uint64())
|
||||||
|
|
||||||
|
whence, _ := r.Seek(0, io.SeekCurrent)
|
||||||
|
stringData := data[whence : whence+sLen]
|
||||||
|
declData := data[whence+sLen : whence+sLen+dLen]
|
||||||
|
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||||
|
|
||||||
|
p := iimporter{
|
||||||
|
ipath: path,
|
||||||
|
|
||||||
|
stringData: stringData,
|
||||||
|
stringCache: make(map[uint64]string),
|
||||||
|
pkgCache: make(map[uint64]*types.Package),
|
||||||
|
|
||||||
|
declData: declData,
|
||||||
|
pkgIndex: make(map[*types.Package]map[string]uint64),
|
||||||
|
typCache: make(map[uint64]types.Type),
|
||||||
|
|
||||||
|
fake: fakeFileSet{
|
||||||
|
fset: fset,
|
||||||
|
files: make(map[string]*token.File),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, pt := range predeclared() {
|
||||||
|
p.typCache[uint64(i)] = pt
|
||||||
|
}
|
||||||
|
|
||||||
|
pkgList := make([]*types.Package, r.uint64())
|
||||||
|
for i := range pkgList {
|
||||||
|
pkgPathOff := r.uint64()
|
||||||
|
pkgPath := p.stringAt(pkgPathOff)
|
||||||
|
pkgName := p.stringAt(r.uint64())
|
||||||
|
_ = r.uint64() // package height; unused by go/types
|
||||||
|
|
||||||
|
if pkgPath == "" {
|
||||||
|
pkgPath = path
|
||||||
|
}
|
||||||
|
pkg := imports[pkgPath]
|
||||||
|
if pkg == nil {
|
||||||
|
pkg = types.NewPackage(pkgPath, pkgName)
|
||||||
|
imports[pkgPath] = pkg
|
||||||
|
} else if pkg.Name() != pkgName {
|
||||||
|
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pkgCache[pkgPathOff] = pkg
|
||||||
|
|
||||||
|
nameIndex := make(map[string]uint64)
|
||||||
|
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
|
||||||
|
name := p.stringAt(r.uint64())
|
||||||
|
nameIndex[name] = r.uint64()
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pkgIndex[pkg] = nameIndex
|
||||||
|
pkgList[i] = pkg
|
||||||
|
}
|
||||||
|
var localpkg *types.Package
|
||||||
|
for _, pkg := range pkgList {
|
||||||
|
if pkg.Path() == path {
|
||||||
|
localpkg = pkg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
names := make([]string, 0, len(p.pkgIndex[localpkg]))
|
||||||
|
for name := range p.pkgIndex[localpkg] {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
for _, name := range names {
|
||||||
|
p.doDecl(localpkg, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, typ := range p.interfaceList {
|
||||||
|
typ.Complete()
|
||||||
|
}
|
||||||
|
|
||||||
|
// record all referenced packages as imports
|
||||||
|
list := append(([]*types.Package)(nil), pkgList[1:]...)
|
||||||
|
sort.Sort(byPath(list))
|
||||||
|
localpkg.SetImports(list)
|
||||||
|
|
||||||
|
// package was imported completely and without errors
|
||||||
|
localpkg.MarkComplete()
|
||||||
|
|
||||||
|
consumed, _ := r.Seek(0, io.SeekCurrent)
|
||||||
|
return int(consumed), localpkg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type iimporter struct {
|
||||||
|
ipath string
|
||||||
|
|
||||||
|
stringData []byte
|
||||||
|
stringCache map[uint64]string
|
||||||
|
pkgCache map[uint64]*types.Package
|
||||||
|
|
||||||
|
declData []byte
|
||||||
|
pkgIndex map[*types.Package]map[string]uint64
|
||||||
|
typCache map[uint64]types.Type
|
||||||
|
|
||||||
|
fake fakeFileSet
|
||||||
|
interfaceList []*types.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) doDecl(pkg *types.Package, name string) {
|
||||||
|
// See if we've already imported this declaration.
|
||||||
|
if obj := pkg.Scope().Lookup(name); obj != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
off, ok := p.pkgIndex[pkg][name]
|
||||||
|
if !ok {
|
||||||
|
errorf("%v.%v not in index", pkg, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &importReader{p: p, currPkg: pkg}
|
||||||
|
r.declReader.Reset(p.declData[off:])
|
||||||
|
|
||||||
|
r.obj(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) stringAt(off uint64) string {
|
||||||
|
if s, ok := p.stringCache[off]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
slen, n := binary.Uvarint(p.stringData[off:])
|
||||||
|
if n <= 0 {
|
||||||
|
errorf("varint failed")
|
||||||
|
}
|
||||||
|
spos := off + uint64(n)
|
||||||
|
s := string(p.stringData[spos : spos+slen])
|
||||||
|
p.stringCache[off] = s
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||||
|
if pkg, ok := p.pkgCache[off]; ok {
|
||||||
|
return pkg
|
||||||
|
}
|
||||||
|
path := p.stringAt(off)
|
||||||
|
errorf("missing package %q in %q", path, p.ipath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
|
||||||
|
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
if off < predeclReserved {
|
||||||
|
errorf("predeclared type missing from cache: %v", off)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &importReader{p: p}
|
||||||
|
r.declReader.Reset(p.declData[off-predeclReserved:])
|
||||||
|
t := r.doType(base)
|
||||||
|
|
||||||
|
if base == nil || !isInterface(t) {
|
||||||
|
p.typCache[off] = t
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
type importReader struct {
|
||||||
|
p *iimporter
|
||||||
|
declReader bytes.Reader
|
||||||
|
currPkg *types.Package
|
||||||
|
prevFile string
|
||||||
|
prevLine int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) obj(name string) {
|
||||||
|
tag := r.byte()
|
||||||
|
pos := r.pos()
|
||||||
|
|
||||||
|
switch tag {
|
||||||
|
case 'A':
|
||||||
|
typ := r.typ()
|
||||||
|
|
||||||
|
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
|
||||||
|
|
||||||
|
case 'C':
|
||||||
|
typ, val := r.value()
|
||||||
|
|
||||||
|
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
|
||||||
|
|
||||||
|
case 'F':
|
||||||
|
sig := r.signature(nil)
|
||||||
|
|
||||||
|
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
|
||||||
|
|
||||||
|
case 'T':
|
||||||
|
// Types can be recursive. We need to setup a stub
|
||||||
|
// declaration before recursing.
|
||||||
|
obj := types.NewTypeName(pos, r.currPkg, name, nil)
|
||||||
|
named := types.NewNamed(obj, nil, nil)
|
||||||
|
r.declare(obj)
|
||||||
|
|
||||||
|
underlying := r.p.typAt(r.uint64(), named).Underlying()
|
||||||
|
named.SetUnderlying(underlying)
|
||||||
|
|
||||||
|
if !isInterface(underlying) {
|
||||||
|
for n := r.uint64(); n > 0; n-- {
|
||||||
|
mpos := r.pos()
|
||||||
|
mname := r.ident()
|
||||||
|
recv := r.param()
|
||||||
|
msig := r.signature(recv)
|
||||||
|
|
||||||
|
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'V':
|
||||||
|
typ := r.typ()
|
||||||
|
|
||||||
|
r.declare(types.NewVar(pos, r.currPkg, name, typ))
|
||||||
|
|
||||||
|
default:
|
||||||
|
errorf("unexpected tag: %v", tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) declare(obj types.Object) {
|
||||||
|
obj.Pkg().Scope().Insert(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) value() (typ types.Type, val constant.Value) {
|
||||||
|
typ = r.typ()
|
||||||
|
|
||||||
|
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
|
||||||
|
case types.IsBoolean:
|
||||||
|
val = constant.MakeBool(r.bool())
|
||||||
|
|
||||||
|
case types.IsString:
|
||||||
|
val = constant.MakeString(r.string())
|
||||||
|
|
||||||
|
case types.IsInteger:
|
||||||
|
val = r.mpint(b)
|
||||||
|
|
||||||
|
case types.IsFloat:
|
||||||
|
val = r.mpfloat(b)
|
||||||
|
|
||||||
|
case types.IsComplex:
|
||||||
|
re := r.mpfloat(b)
|
||||||
|
im := r.mpfloat(b)
|
||||||
|
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||||
|
|
||||||
|
default:
|
||||||
|
if b.Kind() == types.Invalid {
|
||||||
|
val = constant.MakeUnknown()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errorf("unexpected type %v", typ) // panics
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
|
||||||
|
if (b.Info() & types.IsUntyped) != 0 {
|
||||||
|
return true, 64
|
||||||
|
}
|
||||||
|
|
||||||
|
switch b.Kind() {
|
||||||
|
case types.Float32, types.Complex64:
|
||||||
|
return true, 3
|
||||||
|
case types.Float64, types.Complex128:
|
||||||
|
return true, 7
|
||||||
|
}
|
||||||
|
|
||||||
|
signed = (b.Info() & types.IsUnsigned) == 0
|
||||||
|
switch b.Kind() {
|
||||||
|
case types.Int8, types.Uint8:
|
||||||
|
maxBytes = 1
|
||||||
|
case types.Int16, types.Uint16:
|
||||||
|
maxBytes = 2
|
||||||
|
case types.Int32, types.Uint32:
|
||||||
|
maxBytes = 4
|
||||||
|
default:
|
||||||
|
maxBytes = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) mpint(b *types.Basic) constant.Value {
|
||||||
|
signed, maxBytes := intSize(b)
|
||||||
|
|
||||||
|
maxSmall := 256 - maxBytes
|
||||||
|
if signed {
|
||||||
|
maxSmall = 256 - 2*maxBytes
|
||||||
|
}
|
||||||
|
if maxBytes == 1 {
|
||||||
|
maxSmall = 256
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := r.declReader.ReadByte()
|
||||||
|
if uint(n) < maxSmall {
|
||||||
|
v := int64(n)
|
||||||
|
if signed {
|
||||||
|
v >>= 1
|
||||||
|
if n&1 != 0 {
|
||||||
|
v = ^v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return constant.MakeInt64(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
v := -n
|
||||||
|
if signed {
|
||||||
|
v = -(n &^ 1) >> 1
|
||||||
|
}
|
||||||
|
if v < 1 || uint(v) > maxBytes {
|
||||||
|
errorf("weird decoding: %v, %v => %v", n, signed, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, v)
|
||||||
|
io.ReadFull(&r.declReader, buf)
|
||||||
|
|
||||||
|
// convert to little endian
|
||||||
|
// TODO(gri) go/constant should have a more direct conversion function
|
||||||
|
// (e.g., once it supports a big.Float based implementation)
|
||||||
|
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
buf[i], buf[j] = buf[j], buf[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
x := constant.MakeFromBytes(buf)
|
||||||
|
if signed && n&1 != 0 {
|
||||||
|
x = constant.UnaryOp(token.SUB, x, 0)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
|
||||||
|
x := r.mpint(b)
|
||||||
|
if constant.Sign(x) == 0 {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := r.int64()
|
||||||
|
switch {
|
||||||
|
case exp > 0:
|
||||||
|
x = constant.Shift(x, token.SHL, uint(exp))
|
||||||
|
case exp < 0:
|
||||||
|
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
|
||||||
|
x = constant.BinaryOp(x, token.QUO, d)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) ident() string {
|
||||||
|
return r.string()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
||||||
|
name := r.string()
|
||||||
|
pkg := r.pkg()
|
||||||
|
return pkg, name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) pos() token.Pos {
|
||||||
|
delta := r.int64()
|
||||||
|
if delta != deltaNewFile {
|
||||||
|
r.prevLine += delta
|
||||||
|
} else if l := r.int64(); l == -1 {
|
||||||
|
r.prevLine += deltaNewFile
|
||||||
|
} else {
|
||||||
|
r.prevFile = r.string()
|
||||||
|
r.prevLine = l
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.prevFile == "" && r.prevLine == 0 {
|
||||||
|
return token.NoPos
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.p.fake.pos(r.prevFile, int(r.prevLine))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) typ() types.Type {
|
||||||
|
return r.p.typAt(r.uint64(), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isInterface(t types.Type) bool {
|
||||||
|
_, ok := t.(*types.Interface)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
|
||||||
|
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
|
||||||
|
|
||||||
|
func (r *importReader) doType(base *types.Named) types.Type {
|
||||||
|
switch k := r.kind(); k {
|
||||||
|
default:
|
||||||
|
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case definedType:
|
||||||
|
pkg, name := r.qualifiedIdent()
|
||||||
|
r.p.doDecl(pkg, name)
|
||||||
|
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
|
||||||
|
case pointerType:
|
||||||
|
return types.NewPointer(r.typ())
|
||||||
|
case sliceType:
|
||||||
|
return types.NewSlice(r.typ())
|
||||||
|
case arrayType:
|
||||||
|
n := r.uint64()
|
||||||
|
return types.NewArray(r.typ(), int64(n))
|
||||||
|
case chanType:
|
||||||
|
dir := chanDir(int(r.uint64()))
|
||||||
|
return types.NewChan(dir, r.typ())
|
||||||
|
case mapType:
|
||||||
|
return types.NewMap(r.typ(), r.typ())
|
||||||
|
case signatureType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
return r.signature(nil)
|
||||||
|
|
||||||
|
case structType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
|
||||||
|
fields := make([]*types.Var, r.uint64())
|
||||||
|
tags := make([]string, len(fields))
|
||||||
|
for i := range fields {
|
||||||
|
fpos := r.pos()
|
||||||
|
fname := r.ident()
|
||||||
|
ftyp := r.typ()
|
||||||
|
emb := r.bool()
|
||||||
|
tag := r.string()
|
||||||
|
|
||||||
|
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
|
||||||
|
tags[i] = tag
|
||||||
|
}
|
||||||
|
return types.NewStruct(fields, tags)
|
||||||
|
|
||||||
|
case interfaceType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
|
||||||
|
embeddeds := make([]types.Type, r.uint64())
|
||||||
|
for i := range embeddeds {
|
||||||
|
_ = r.pos()
|
||||||
|
embeddeds[i] = r.typ()
|
||||||
|
}
|
||||||
|
|
||||||
|
methods := make([]*types.Func, r.uint64())
|
||||||
|
for i := range methods {
|
||||||
|
mpos := r.pos()
|
||||||
|
mname := r.ident()
|
||||||
|
|
||||||
|
// TODO(mdempsky): Matches bimport.go, but I
|
||||||
|
// don't agree with this.
|
||||||
|
var recv *types.Var
|
||||||
|
if base != nil {
|
||||||
|
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
|
||||||
|
}
|
||||||
|
|
||||||
|
msig := r.signature(recv)
|
||||||
|
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := newInterface(methods, embeddeds)
|
||||||
|
r.p.interfaceList = append(r.p.interfaceList, typ)
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) kind() itag {
|
||||||
|
return itag(r.uint64())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) signature(recv *types.Var) *types.Signature {
|
||||||
|
params := r.paramList()
|
||||||
|
results := r.paramList()
|
||||||
|
variadic := params.Len() > 0 && r.bool()
|
||||||
|
return types.NewSignature(recv, params, results, variadic)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) paramList() *types.Tuple {
|
||||||
|
xs := make([]*types.Var, r.uint64())
|
||||||
|
for i := range xs {
|
||||||
|
xs[i] = r.param()
|
||||||
|
}
|
||||||
|
return types.NewTuple(xs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) param() *types.Var {
|
||||||
|
pos := r.pos()
|
||||||
|
name := r.ident()
|
||||||
|
typ := r.typ()
|
||||||
|
return types.NewParam(pos, r.currPkg, name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) bool() bool {
|
||||||
|
return r.uint64() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) int64() int64 {
|
||||||
|
n, err := binary.ReadVarint(&r.declReader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("readVarint: %v", err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) uint64() uint64 {
|
||||||
|
n, err := binary.ReadUvarint(&r.declReader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("readUvarint: %v", err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) byte() byte {
|
||||||
|
x, err := r.declReader.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
errorf("declReader.ReadByte: %v", err)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
21
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
21
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.11
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import "go/types"
|
||||||
|
|
||||||
|
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||||
|
named := make([]*types.Named, len(embeddeds))
|
||||||
|
for i, e := range embeddeds {
|
||||||
|
var ok bool
|
||||||
|
named[i], ok = e.(*types.Named)
|
||||||
|
if !ok {
|
||||||
|
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return types.NewInterface(methods, named)
|
||||||
|
}
|
13
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.11
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import "go/types"
|
||||||
|
|
||||||
|
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||||
|
return types.NewInterfaceType(methods, embeddeds)
|
||||||
|
}
|
|
@ -0,0 +1,160 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package packagesdriver fetches type sizes for go/packages and go/analysis.
|
||||||
|
package packagesdriver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var debug = false
|
||||||
|
|
||||||
|
// GetSizes returns the sizes used by the underlying driver with the given parameters.
|
||||||
|
func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
|
||||||
|
// TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
|
||||||
|
const toolPrefix = "GOPACKAGESDRIVER="
|
||||||
|
tool := ""
|
||||||
|
for _, env := range env {
|
||||||
|
if val := strings.TrimPrefix(env, toolPrefix); val != env {
|
||||||
|
tool = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tool == "" {
|
||||||
|
var err error
|
||||||
|
tool, err = exec.LookPath("gopackagesdriver")
|
||||||
|
if err != nil {
|
||||||
|
// We did not find the driver, so use "go list".
|
||||||
|
tool = "off"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tool == "off" {
|
||||||
|
return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := json.Marshal(struct {
|
||||||
|
Command string `json:"command"`
|
||||||
|
Env []string `json:"env"`
|
||||||
|
BuildFlags []string `json:"build_flags"`
|
||||||
|
}{
|
||||||
|
Command: "sizes",
|
||||||
|
Env: env,
|
||||||
|
BuildFlags: buildFlags,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(ctx, tool)
|
||||||
|
cmd.Dir = dir
|
||||||
|
cmd.Env = env
|
||||||
|
cmd.Stdin = bytes.NewReader(req)
|
||||||
|
cmd.Stdout = buf
|
||||||
|
cmd.Stderr = new(bytes.Buffer)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||||
|
}
|
||||||
|
var response struct {
|
||||||
|
// Sizes, if not nil, is the types.Sizes to use when type checking.
|
||||||
|
Sizes *types.StdSizes
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return response.Sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
|
||||||
|
args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
|
||||||
|
args = append(args, buildFlags...)
|
||||||
|
args = append(args, "--", "unsafe")
|
||||||
|
stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fields := strings.Fields(stdout.String())
|
||||||
|
if len(fields) < 2 {
|
||||||
|
return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
|
||||||
|
}
|
||||||
|
goarch := fields[0]
|
||||||
|
compiler := fields[1]
|
||||||
|
return types.SizesFor(compiler, goarch), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvokeGo returns the stdout of a go command invocation.
|
||||||
|
func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
|
||||||
|
if debug {
|
||||||
|
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
|
||||||
|
}
|
||||||
|
stdout := new(bytes.Buffer)
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(ctx, "go", args...)
|
||||||
|
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||||
|
// expects the working directory to keep the original path, including the
|
||||||
|
// go command when dealing with modules.
|
||||||
|
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||||
|
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||||
|
// process we fix up all the paths returned by the go command.
|
||||||
|
cmd.Env = append(append([]string{}, env...), "PWD="+dir)
|
||||||
|
cmd.Dir = dir
|
||||||
|
cmd.Stdout = stdout
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
exitErr, ok := err.(*exec.ExitError)
|
||||||
|
if !ok {
|
||||||
|
// Catastrophic error:
|
||||||
|
// - executable not found
|
||||||
|
// - context cancellation
|
||||||
|
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export mode entails a build.
|
||||||
|
// If that build fails, errors appear on stderr
|
||||||
|
// (despite the -e flag) and the Export field is blank.
|
||||||
|
// Do not fail in that case.
|
||||||
|
if !usesExportData {
|
||||||
|
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// As of writing, go list -export prints some non-fatal compilation
|
||||||
|
// errors to stderr, even with -e set. We would prefer that it put
|
||||||
|
// them in the Package.Error JSON (see https://golang.org/issue/26319).
|
||||||
|
// In the meantime, there's nowhere good to put them, but they can
|
||||||
|
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
|
||||||
|
// is set.
|
||||||
|
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugging
|
||||||
|
if false {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdDebugStr(envlist []string, args ...string) string {
|
||||||
|
env := make(map[string]string)
|
||||||
|
for _, kv := range envlist {
|
||||||
|
split := strings.Split(kv, "=")
|
||||||
|
k, v := split[0], split[1]
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
|
||||||
|
}
|
|
@ -0,0 +1,222 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package packages loads Go packages for inspection and analysis.
|
||||||
|
|
||||||
|
The Load function takes as input a list of patterns and return a list of Package
|
||||||
|
structs describing individual packages matched by those patterns.
|
||||||
|
The LoadMode controls the amount of detail in the loaded packages.
|
||||||
|
|
||||||
|
Load passes most patterns directly to the underlying build tool,
|
||||||
|
but all patterns with the prefix "query=", where query is a
|
||||||
|
non-empty string of letters from [a-z], are reserved and may be
|
||||||
|
interpreted as query operators.
|
||||||
|
|
||||||
|
Two query operators are currently supported: "file" and "pattern".
|
||||||
|
|
||||||
|
The query "file=path/to/file.go" matches the package or packages enclosing
|
||||||
|
the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
|
||||||
|
might return the packages "fmt" and "fmt [fmt.test]".
|
||||||
|
|
||||||
|
The query "pattern=string" causes "string" to be passed directly to
|
||||||
|
the underlying build tool. In most cases this is unnecessary,
|
||||||
|
but an application can use Load("pattern=" + x) as an escaping mechanism
|
||||||
|
to ensure that x is not interpreted as a query operator if it contains '='.
|
||||||
|
|
||||||
|
All other query operators are reserved for future use and currently
|
||||||
|
cause Load to report an error.
|
||||||
|
|
||||||
|
The Package struct provides basic information about the package, including
|
||||||
|
|
||||||
|
- ID, a unique identifier for the package in the returned set;
|
||||||
|
- GoFiles, the names of the package's Go source files;
|
||||||
|
- Imports, a map from source import strings to the Packages they name;
|
||||||
|
- Types, the type information for the package's exported symbols;
|
||||||
|
- Syntax, the parsed syntax trees for the package's source code; and
|
||||||
|
- TypeInfo, the result of a complete type-check of the package syntax trees.
|
||||||
|
|
||||||
|
(See the documentation for type Package for the complete list of fields
|
||||||
|
and more detailed descriptions.)
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
Load(nil, "bytes", "unicode...")
|
||||||
|
|
||||||
|
returns four Package structs describing the standard library packages
|
||||||
|
bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
|
||||||
|
can match multiple packages and that a package might be matched by
|
||||||
|
multiple patterns: in general it is not possible to determine which
|
||||||
|
packages correspond to which patterns.
|
||||||
|
|
||||||
|
Note that the list returned by Load contains only the packages matched
|
||||||
|
by the patterns. Their dependencies can be found by walking the import
|
||||||
|
graph using the Imports fields.
|
||||||
|
|
||||||
|
The Load function can be configured by passing a pointer to a Config as
|
||||||
|
the first argument. A nil Config is equivalent to the zero Config, which
|
||||||
|
causes Load to run in LoadFiles mode, collecting minimal information.
|
||||||
|
See the documentation for type Config for details.
|
||||||
|
|
||||||
|
As noted earlier, the Config.Mode controls the amount of detail
|
||||||
|
reported about the loaded packages, with each mode returning all the data of the
|
||||||
|
previous mode with some extra added. See the documentation for type LoadMode
|
||||||
|
for details.
|
||||||
|
|
||||||
|
Most tools should pass their command-line arguments (after any flags)
|
||||||
|
uninterpreted to the loader, so that the loader can interpret them
|
||||||
|
according to the conventions of the underlying build system.
|
||||||
|
See the Example function for typical usage.
|
||||||
|
|
||||||
|
*/
|
||||||
|
package packages // import "golang.org/x/tools/go/packages"
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
Motivation and design considerations
|
||||||
|
|
||||||
|
The new package's design solves problems addressed by two existing
|
||||||
|
packages: go/build, which locates and describes packages, and
|
||||||
|
golang.org/x/tools/go/loader, which loads, parses and type-checks them.
|
||||||
|
The go/build.Package structure encodes too much of the 'go build' way
|
||||||
|
of organizing projects, leaving us in need of a data type that describes a
|
||||||
|
package of Go source code independent of the underlying build system.
|
||||||
|
We wanted something that works equally well with go build and vgo, and
|
||||||
|
also other build systems such as Bazel and Blaze, making it possible to
|
||||||
|
construct analysis tools that work in all these environments.
|
||||||
|
Tools such as errcheck and staticcheck were essentially unavailable to
|
||||||
|
the Go community at Google, and some of Google's internal tools for Go
|
||||||
|
are unavailable externally.
|
||||||
|
This new package provides a uniform way to obtain package metadata by
|
||||||
|
querying each of these build systems, optionally supporting their
|
||||||
|
preferred command-line notations for packages, so that tools integrate
|
||||||
|
neatly with users' build environments. The Metadata query function
|
||||||
|
executes an external query tool appropriate to the current workspace.
|
||||||
|
|
||||||
|
Loading packages always returns the complete import graph "all the way down",
|
||||||
|
even if all you want is information about a single package, because the query
|
||||||
|
mechanisms of all the build systems we currently support ({go,vgo} list, and
|
||||||
|
blaze/bazel aspect-based query) cannot provide detailed information
|
||||||
|
about one package without visiting all its dependencies too, so there is
|
||||||
|
no additional asymptotic cost to providing transitive information.
|
||||||
|
(This property might not be true of a hypothetical 5th build system.)
|
||||||
|
|
||||||
|
In calls to TypeCheck, all initial packages, and any package that
|
||||||
|
transitively depends on one of them, must be loaded from source.
|
||||||
|
Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
|
||||||
|
source; D may be loaded from export data, and E may not be loaded at all
|
||||||
|
(though it's possible that D's export data mentions it, so a
|
||||||
|
types.Package may be created for it and exposed.)
|
||||||
|
|
||||||
|
The old loader had a feature to suppress type-checking of function
|
||||||
|
bodies on a per-package basis, primarily intended to reduce the work of
|
||||||
|
obtaining type information for imported packages. Now that imports are
|
||||||
|
satisfied by export data, the optimization no longer seems necessary.
|
||||||
|
|
||||||
|
Despite some early attempts, the old loader did not exploit export data,
|
||||||
|
instead always using the equivalent of WholeProgram mode. This was due
|
||||||
|
to the complexity of mixing source and export data packages (now
|
||||||
|
resolved by the upward traversal mentioned above), and because export data
|
||||||
|
files were nearly always missing or stale. Now that 'go build' supports
|
||||||
|
caching, all the underlying build systems can guarantee to produce
|
||||||
|
export data in a reasonable (amortized) time.
|
||||||
|
|
||||||
|
Test "main" packages synthesized by the build system are now reported as
|
||||||
|
first-class packages, avoiding the need for clients (such as go/ssa) to
|
||||||
|
reinvent this generation logic.
|
||||||
|
|
||||||
|
One way in which go/packages is simpler than the old loader is in its
|
||||||
|
treatment of in-package tests. In-package tests are packages that
|
||||||
|
consist of all the files of the library under test, plus the test files.
|
||||||
|
The old loader constructed in-package tests by a two-phase process of
|
||||||
|
mutation called "augmentation": first it would construct and type check
|
||||||
|
all the ordinary library packages and type-check the packages that
|
||||||
|
depend on them; then it would add more (test) files to the package and
|
||||||
|
type-check again. This two-phase approach had four major problems:
|
||||||
|
1) in processing the tests, the loader modified the library package,
|
||||||
|
leaving no way for a client application to see both the test
|
||||||
|
package and the library package; one would mutate into the other.
|
||||||
|
2) because test files can declare additional methods on types defined in
|
||||||
|
the library portion of the package, the dispatch of method calls in
|
||||||
|
the library portion was affected by the presence of the test files.
|
||||||
|
This should have been a clue that the packages were logically
|
||||||
|
different.
|
||||||
|
3) this model of "augmentation" assumed at most one in-package test
|
||||||
|
per library package, which is true of projects using 'go build',
|
||||||
|
but not other build systems.
|
||||||
|
4) because of the two-phase nature of test processing, all packages that
|
||||||
|
import the library package had to be processed before augmentation,
|
||||||
|
forcing a "one-shot" API and preventing the client from calling Load
|
||||||
|
in several times in sequence as is now possible in WholeProgram mode.
|
||||||
|
(TypeCheck mode has a similar one-shot restriction for a different reason.)
|
||||||
|
|
||||||
|
Early drafts of this package supported "multi-shot" operation.
|
||||||
|
Although it allowed clients to make a sequence of calls (or concurrent
|
||||||
|
calls) to Load, building up the graph of Packages incrementally,
|
||||||
|
it was of marginal value: it complicated the API
|
||||||
|
(since it allowed some options to vary across calls but not others),
|
||||||
|
it complicated the implementation,
|
||||||
|
it cannot be made to work in Types mode, as explained above,
|
||||||
|
and it was less efficient than making one combined call (when this is possible).
|
||||||
|
Among the clients we have inspected, none made multiple calls to load
|
||||||
|
but could not be easily and satisfactorily modified to make only a single call.
|
||||||
|
However, applications changes may be required.
|
||||||
|
For example, the ssadump command loads the user-specified packages
|
||||||
|
and in addition the runtime package. It is tempting to simply append
|
||||||
|
"runtime" to the user-provided list, but that does not work if the user
|
||||||
|
specified an ad-hoc package such as [a.go b.go].
|
||||||
|
Instead, ssadump no longer requests the runtime package,
|
||||||
|
but seeks it among the dependencies of the user-specified packages,
|
||||||
|
and emits an error if it is not found.
|
||||||
|
|
||||||
|
Overlays: The Overlay field in the Config allows providing alternate contents
|
||||||
|
for Go source files, by providing a mapping from file path to contents.
|
||||||
|
go/packages will pull in new imports added in overlay files when go/packages
|
||||||
|
is run in LoadImports mode or greater.
|
||||||
|
Overlay support for the go list driver isn't complete yet: if the file doesn't
|
||||||
|
exist on disk, it will only be recognized in an overlay if it is a non-test file
|
||||||
|
and the package would be reported even without the overlay.
|
||||||
|
|
||||||
|
Questions & Tasks
|
||||||
|
|
||||||
|
- Add GOARCH/GOOS?
|
||||||
|
They are not portable concepts, but could be made portable.
|
||||||
|
Our goal has been to allow users to express themselves using the conventions
|
||||||
|
of the underlying build system: if the build system honors GOARCH
|
||||||
|
during a build and during a metadata query, then so should
|
||||||
|
applications built atop that query mechanism.
|
||||||
|
Conversely, if the target architecture of the build is determined by
|
||||||
|
command-line flags, the application can pass the relevant
|
||||||
|
flags through to the build system using a command such as:
|
||||||
|
myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
|
||||||
|
However, this approach is low-level, unwieldy, and non-portable.
|
||||||
|
GOOS and GOARCH seem important enough to warrant a dedicated option.
|
||||||
|
|
||||||
|
- How should we handle partial failures such as a mixture of good and
|
||||||
|
malformed patterns, existing and non-existent packages, successful and
|
||||||
|
failed builds, import failures, import cycles, and so on, in a call to
|
||||||
|
Load?
|
||||||
|
|
||||||
|
- Support bazel, blaze, and go1.10 list, not just go1.11 list.
|
||||||
|
|
||||||
|
- Handle (and test) various partial success cases, e.g.
|
||||||
|
a mixture of good packages and:
|
||||||
|
invalid patterns
|
||||||
|
nonexistent packages
|
||||||
|
empty packages
|
||||||
|
packages with malformed package or import declarations
|
||||||
|
unreadable files
|
||||||
|
import cycles
|
||||||
|
other parse errors
|
||||||
|
type errors
|
||||||
|
Make sure we record errors at the correct place in the graph.
|
||||||
|
|
||||||
|
- Missing packages among initial arguments are not reported.
|
||||||
|
Return bogus packages for them, like golist does.
|
||||||
|
|
||||||
|
- "undeclared name" errors (for example) are reported out of source file
|
||||||
|
order. I suspect this is due to the breadth-first resolution now used
|
||||||
|
by go/types. Is that a bug? Discuss with gri.
|
||||||
|
|
||||||
|
*/
|
|
@ -0,0 +1,79 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file enables an external tool to intercept package requests.
|
||||||
|
// If the tool is present then its results are used in preference to
|
||||||
|
// the go list command.
|
||||||
|
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Driver
|
||||||
|
type driverRequest struct {
|
||||||
|
Command string `json:"command"`
|
||||||
|
Mode LoadMode `json:"mode"`
|
||||||
|
Env []string `json:"env"`
|
||||||
|
BuildFlags []string `json:"build_flags"`
|
||||||
|
Tests bool `json:"tests"`
|
||||||
|
Overlay map[string][]byte `json:"overlay"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// findExternalDriver returns the file path of a tool that supplies
|
||||||
|
// the build system package structure, or "" if not found."
|
||||||
|
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
||||||
|
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
|
||||||
|
func findExternalDriver(cfg *Config) driver {
|
||||||
|
const toolPrefix = "GOPACKAGESDRIVER="
|
||||||
|
tool := ""
|
||||||
|
for _, env := range cfg.Env {
|
||||||
|
if val := strings.TrimPrefix(env, toolPrefix); val != env {
|
||||||
|
tool = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tool != "" && tool == "off" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if tool == "" {
|
||||||
|
var err error
|
||||||
|
tool, err = exec.LookPath("gopackagesdriver")
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return func(cfg *Config, words ...string) (*driverResponse, error) {
|
||||||
|
req, err := json.Marshal(driverRequest{
|
||||||
|
Mode: cfg.Mode,
|
||||||
|
Env: cfg.Env,
|
||||||
|
BuildFlags: cfg.BuildFlags,
|
||||||
|
Tests: cfg.Tests,
|
||||||
|
Overlay: cfg.Overlay,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(cfg.Context, tool, words...)
|
||||||
|
cmd.Dir = cfg.Dir
|
||||||
|
cmd.Env = cfg.Env
|
||||||
|
cmd.Stdin = bytes.NewReader(req)
|
||||||
|
cmd.Stdout = buf
|
||||||
|
cmd.Stderr = new(bytes.Buffer)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||||
|
}
|
||||||
|
var response driverResponse
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,870 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/types"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/internal/packagesdriver"
|
||||||
|
"golang.org/x/tools/internal/gopathwalk"
|
||||||
|
"golang.org/x/tools/internal/semver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// debug controls verbose logging.
|
||||||
|
var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
|
||||||
|
|
||||||
|
// A goTooOldError reports that the go command
|
||||||
|
// found by exec.LookPath is too old to use the new go list behavior.
|
||||||
|
type goTooOldError struct {
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
// responseDeduper wraps a driverResponse, deduplicating its contents.
|
||||||
|
type responseDeduper struct {
|
||||||
|
seenRoots map[string]bool
|
||||||
|
seenPackages map[string]*Package
|
||||||
|
dr *driverResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
// init fills in r with a driverResponse.
|
||||||
|
func (r *responseDeduper) init(dr *driverResponse) {
|
||||||
|
r.dr = dr
|
||||||
|
r.seenRoots = map[string]bool{}
|
||||||
|
r.seenPackages = map[string]*Package{}
|
||||||
|
for _, pkg := range dr.Packages {
|
||||||
|
r.seenPackages[pkg.ID] = pkg
|
||||||
|
}
|
||||||
|
for _, root := range dr.Roots {
|
||||||
|
r.seenRoots[root] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseDeduper) addPackage(p *Package) {
|
||||||
|
if r.seenPackages[p.ID] != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.seenPackages[p.ID] = p
|
||||||
|
r.dr.Packages = append(r.dr.Packages, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseDeduper) addRoot(id string) {
|
||||||
|
if r.seenRoots[id] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.seenRoots[id] = true
|
||||||
|
r.dr.Roots = append(r.dr.Roots, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// goListDriver uses the go list command to interpret the patterns and produce
|
||||||
|
// the build system package structure.
|
||||||
|
// See driver for more details.
|
||||||
|
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||||
|
var sizes types.Sizes
|
||||||
|
var sizeserr error
|
||||||
|
var sizeswg sync.WaitGroup
|
||||||
|
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||||
|
sizeswg.Add(1)
|
||||||
|
go func() {
|
||||||
|
sizes, sizeserr = getSizes(cfg)
|
||||||
|
sizeswg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine files requested in contains patterns
|
||||||
|
var containFiles []string
|
||||||
|
var packagesNamed []string
|
||||||
|
restPatterns := make([]string, 0, len(patterns))
|
||||||
|
// Extract file= and other [querytype]= patterns. Report an error if querytype
|
||||||
|
// doesn't exist.
|
||||||
|
extractQueries:
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
eqidx := strings.Index(pattern, "=")
|
||||||
|
if eqidx < 0 {
|
||||||
|
restPatterns = append(restPatterns, pattern)
|
||||||
|
} else {
|
||||||
|
query, value := pattern[:eqidx], pattern[eqidx+len("="):]
|
||||||
|
switch query {
|
||||||
|
case "file":
|
||||||
|
containFiles = append(containFiles, value)
|
||||||
|
case "pattern":
|
||||||
|
restPatterns = append(restPatterns, value)
|
||||||
|
case "iamashamedtousethedisabledqueryname":
|
||||||
|
packagesNamed = append(packagesNamed, value)
|
||||||
|
case "": // not a reserved query
|
||||||
|
restPatterns = append(restPatterns, pattern)
|
||||||
|
default:
|
||||||
|
for _, rune := range query {
|
||||||
|
if rune < 'a' || rune > 'z' { // not a reserved query
|
||||||
|
restPatterns = append(restPatterns, pattern)
|
||||||
|
continue extractQueries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Reject all other patterns containing "="
|
||||||
|
return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &responseDeduper{}
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// See if we have any patterns to pass through to go list. Zero initial
|
||||||
|
// patterns also requires a go list call, since it's the equivalent of
|
||||||
|
// ".".
|
||||||
|
if len(restPatterns) > 0 || len(patterns) == 0 {
|
||||||
|
dr, err := golistDriver(cfg, restPatterns...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
response.init(dr)
|
||||||
|
} else {
|
||||||
|
response.init(&driverResponse{})
|
||||||
|
}
|
||||||
|
|
||||||
|
sizeswg.Wait()
|
||||||
|
if sizeserr != nil {
|
||||||
|
return nil, sizeserr
|
||||||
|
}
|
||||||
|
// types.SizesFor always returns nil or a *types.StdSizes
|
||||||
|
response.dr.Sizes, _ = sizes.(*types.StdSizes)
|
||||||
|
|
||||||
|
var containsCandidates []string
|
||||||
|
|
||||||
|
if len(containFiles) != 0 {
|
||||||
|
if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(packagesNamed) != 0 {
|
||||||
|
if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(containFiles) > 0 {
|
||||||
|
containsCandidates = append(containsCandidates, modifiedPkgs...)
|
||||||
|
containsCandidates = append(containsCandidates, needPkgs...)
|
||||||
|
}
|
||||||
|
if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Check candidate packages for containFiles.
|
||||||
|
if len(containFiles) > 0 {
|
||||||
|
for _, id := range containsCandidates {
|
||||||
|
pkg, ok := response.seenPackages[id]
|
||||||
|
if !ok {
|
||||||
|
response.addPackage(&Package{
|
||||||
|
ID: id,
|
||||||
|
Errors: []Error{
|
||||||
|
{
|
||||||
|
Kind: ListError,
|
||||||
|
Msg: fmt.Sprintf("package %s expected but not seen", id),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, f := range containFiles {
|
||||||
|
for _, g := range pkg.GoFiles {
|
||||||
|
if sameFile(f, g) {
|
||||||
|
response.addRoot(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.dr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
|
||||||
|
if len(pkgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dr, err := driver(cfg, pkgs...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, pkg := range dr.Packages {
|
||||||
|
response.addPackage(pkg)
|
||||||
|
}
|
||||||
|
_, needPkgs, err := processGolistOverlay(cfg, response)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := addNeededOverlayPackages(cfg, driver, response, needPkgs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
|
||||||
|
for _, query := range queries {
|
||||||
|
// TODO(matloob): Do only one query per directory.
|
||||||
|
fdir := filepath.Dir(query)
|
||||||
|
// Pass absolute path of directory to go list so that it knows to treat it as a directory,
|
||||||
|
// not a package path.
|
||||||
|
pattern, err := filepath.Abs(fdir)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
|
||||||
|
}
|
||||||
|
dirResponse, err := driver(cfg, pattern)
|
||||||
|
if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) {
|
||||||
|
// There was an error loading the package. Try to load the file as an ad-hoc package.
|
||||||
|
// Usually the error will appear in a returned package, but may not if we're in modules mode
|
||||||
|
// and the ad-hoc is located outside a module.
|
||||||
|
var queryErr error
|
||||||
|
dirResponse, queryErr = driver(cfg, query)
|
||||||
|
if queryErr != nil {
|
||||||
|
// Return the original error if the attempt to fall back failed.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
||||||
|
for _, root := range dirResponse.Roots {
|
||||||
|
isRoot[root] = true
|
||||||
|
}
|
||||||
|
for _, pkg := range dirResponse.Packages {
|
||||||
|
// Add any new packages to the main set
|
||||||
|
// We don't bother to filter packages that will be dropped by the changes of roots,
|
||||||
|
// that will happen anyway during graph construction outside this function.
|
||||||
|
// Over-reporting packages is not a problem.
|
||||||
|
response.addPackage(pkg)
|
||||||
|
// if the package was not a root one, it cannot have the file
|
||||||
|
if !isRoot[pkg.ID] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, pkgFile := range pkg.GoFiles {
|
||||||
|
if filepath.Base(query) == filepath.Base(pkgFile) {
|
||||||
|
response.addRoot(pkg.ID)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||||
|
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||||
|
|
||||||
|
func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
|
||||||
|
// calling `go env` isn't free; bail out if there's nothing to do.
|
||||||
|
if len(queries) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Determine which directories are relevant to scan.
|
||||||
|
roots, modRoot, err := roots(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan the selected directories. Simple matches, from GOPATH/GOROOT
|
||||||
|
// or the local module, can simply be "go list"ed. Matches from the
|
||||||
|
// module cache need special treatment.
|
||||||
|
var matchesMu sync.Mutex
|
||||||
|
var simpleMatches, modCacheMatches []string
|
||||||
|
add := func(root gopathwalk.Root, dir string) {
|
||||||
|
// Walk calls this concurrently; protect the result slices.
|
||||||
|
matchesMu.Lock()
|
||||||
|
defer matchesMu.Unlock()
|
||||||
|
|
||||||
|
path := dir
|
||||||
|
if dir != root.Path {
|
||||||
|
path = dir[len(root.Path)+1:]
|
||||||
|
}
|
||||||
|
if pathMatchesQueries(path, queries) {
|
||||||
|
switch root.Type {
|
||||||
|
case gopathwalk.RootModuleCache:
|
||||||
|
modCacheMatches = append(modCacheMatches, path)
|
||||||
|
case gopathwalk.RootCurrentModule:
|
||||||
|
// We'd need to read go.mod to find the full
|
||||||
|
// import path. Relative's easier.
|
||||||
|
rel, err := filepath.Rel(cfg.Dir, dir)
|
||||||
|
if err != nil {
|
||||||
|
// This ought to be impossible, since
|
||||||
|
// we found dir in the current module.
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
simpleMatches = append(simpleMatches, "./"+rel)
|
||||||
|
case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
|
||||||
|
simpleMatches = append(simpleMatches, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
startWalk := time.Now()
|
||||||
|
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
|
||||||
|
if debug {
|
||||||
|
log.Printf("%v for walk", time.Since(startWalk))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Weird special case: the top-level package in a module will be in
|
||||||
|
// whatever directory the user checked the repository out into. It's
|
||||||
|
// more reasonable for that to not match the package name. So, if there
|
||||||
|
// are any Go files in the mod root, query it just to be safe.
|
||||||
|
if modRoot != "" {
|
||||||
|
rel, err := filepath.Rel(cfg.Dir, modRoot)
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // See above.
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := ioutil.ReadDir(modRoot)
|
||||||
|
for _, f := range files {
|
||||||
|
if strings.HasSuffix(f.Name(), ".go") {
|
||||||
|
simpleMatches = append(simpleMatches, rel)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addResponse := func(r *driverResponse) {
|
||||||
|
for _, pkg := range r.Packages {
|
||||||
|
response.addPackage(pkg)
|
||||||
|
for _, name := range queries {
|
||||||
|
if pkg.Name == name {
|
||||||
|
response.addRoot(pkg.ID)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(simpleMatches) != 0 {
|
||||||
|
resp, err := driver(cfg, simpleMatches...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
addResponse(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Module cache matches are tricky. We want to avoid downloading new
|
||||||
|
// versions of things, so we need to use the ones present in the cache.
|
||||||
|
// go list doesn't accept version specifiers, so we have to write out a
|
||||||
|
// temporary module, and do the list in that module.
|
||||||
|
if len(modCacheMatches) != 0 {
|
||||||
|
// Collect all the matches, deduplicating by major version
|
||||||
|
// and preferring the newest.
|
||||||
|
type modInfo struct {
|
||||||
|
mod string
|
||||||
|
major string
|
||||||
|
}
|
||||||
|
mods := make(map[modInfo]string)
|
||||||
|
var imports []string
|
||||||
|
for _, modPath := range modCacheMatches {
|
||||||
|
matches := modCacheRegexp.FindStringSubmatch(modPath)
|
||||||
|
mod, ver := filepath.ToSlash(matches[1]), matches[2]
|
||||||
|
importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
|
||||||
|
|
||||||
|
major := semver.Major(ver)
|
||||||
|
if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
|
||||||
|
mods[modInfo{mod, major}] = ver
|
||||||
|
}
|
||||||
|
|
||||||
|
imports = append(imports, importPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the temporary module.
|
||||||
|
var gomod bytes.Buffer
|
||||||
|
gomod.WriteString("module modquery\nrequire (\n")
|
||||||
|
for mod, version := range mods {
|
||||||
|
gomod.WriteString("\t" + mod.mod + " " + version + "\n")
|
||||||
|
}
|
||||||
|
gomod.WriteString(")\n")
|
||||||
|
|
||||||
|
tmpCfg := *cfg
|
||||||
|
|
||||||
|
// We're only trying to look at stuff in the module cache, so
|
||||||
|
// disable the network. This should speed things up, and has
|
||||||
|
// prevented errors in at least one case, #28518.
|
||||||
|
tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
|
||||||
|
|
||||||
|
var err error
|
||||||
|
tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpCfg.Dir)
|
||||||
|
|
||||||
|
if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
|
||||||
|
return fmt.Errorf("writing go.mod for module cache query: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the query, using the import paths calculated from the matches above.
|
||||||
|
resp, err := driver(&tmpCfg, imports...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("querying module cache matches: %v", err)
|
||||||
|
}
|
||||||
|
addResponse(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSizes(cfg *Config) (types.Sizes, error) {
|
||||||
|
return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
|
||||||
|
}
|
||||||
|
|
||||||
|
// roots selects the appropriate paths to walk based on the passed-in configuration,
|
||||||
|
// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
|
||||||
|
func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
|
||||||
|
stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.Split(stdout.String(), "\n")
|
||||||
|
if len(fields) != 4 || len(fields[3]) != 0 {
|
||||||
|
return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
|
||||||
|
}
|
||||||
|
goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
|
||||||
|
var modDir string
|
||||||
|
if gomod != "" {
|
||||||
|
modDir = filepath.Dir(gomod)
|
||||||
|
}
|
||||||
|
|
||||||
|
var roots []gopathwalk.Root
|
||||||
|
// Always add GOROOT.
|
||||||
|
roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
|
||||||
|
// If modules are enabled, scan the module dir.
|
||||||
|
if modDir != "" {
|
||||||
|
roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
|
||||||
|
}
|
||||||
|
// Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
|
||||||
|
for _, p := range gopath {
|
||||||
|
if modDir != "" {
|
||||||
|
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
|
||||||
|
} else {
|
||||||
|
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return roots, modDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// These functions were copied from goimports. See further documentation there.
|
||||||
|
|
||||||
|
// pathMatchesQueries is adapted from pkgIsCandidate.
|
||||||
|
// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
|
||||||
|
func pathMatchesQueries(path string, queries []string) bool {
|
||||||
|
lastTwo := lastTwoComponents(path)
|
||||||
|
for _, query := range queries {
|
||||||
|
if strings.Contains(lastTwo, query) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
|
||||||
|
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
|
||||||
|
if strings.Contains(lastTwo, query) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastTwoComponents returns at most the last two path components
|
||||||
|
// of v, using either / or \ as the path separator.
|
||||||
|
func lastTwoComponents(v string) string {
|
||||||
|
nslash := 0
|
||||||
|
for i := len(v) - 1; i >= 0; i-- {
|
||||||
|
if v[i] == '/' || v[i] == '\\' {
|
||||||
|
nslash++
|
||||||
|
if nslash == 2 {
|
||||||
|
return v[i:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHyphenOrUpperASCII(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
b := s[i]
|
||||||
|
if b == '-' || ('A' <= b && b <= 'Z') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
|
||||||
|
buf := make([]byte, 0, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
b := s[i]
|
||||||
|
switch {
|
||||||
|
case b == '-':
|
||||||
|
continue
|
||||||
|
case 'A' <= b && b <= 'Z':
|
||||||
|
buf = append(buf, b+('a'-'A'))
|
||||||
|
default:
|
||||||
|
buf = append(buf, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields must match go list;
|
||||||
|
// see $GOROOT/src/cmd/go/internal/load/pkg.go.
|
||||||
|
type jsonPackage struct {
|
||||||
|
ImportPath string
|
||||||
|
Dir string
|
||||||
|
Name string
|
||||||
|
Export string
|
||||||
|
GoFiles []string
|
||||||
|
CompiledGoFiles []string
|
||||||
|
CFiles []string
|
||||||
|
CgoFiles []string
|
||||||
|
CXXFiles []string
|
||||||
|
MFiles []string
|
||||||
|
HFiles []string
|
||||||
|
FFiles []string
|
||||||
|
SFiles []string
|
||||||
|
SwigFiles []string
|
||||||
|
SwigCXXFiles []string
|
||||||
|
SysoFiles []string
|
||||||
|
Imports []string
|
||||||
|
ImportMap map[string]string
|
||||||
|
Deps []string
|
||||||
|
TestGoFiles []string
|
||||||
|
TestImports []string
|
||||||
|
XTestGoFiles []string
|
||||||
|
XTestImports []string
|
||||||
|
ForTest string // q in a "p [q.test]" package, else ""
|
||||||
|
DepOnly bool
|
||||||
|
|
||||||
|
Error *jsonPackageError
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonPackageError struct {
|
||||||
|
ImportStack []string
|
||||||
|
Pos string
|
||||||
|
Err string
|
||||||
|
}
|
||||||
|
|
||||||
|
func otherFiles(p *jsonPackage) [][]string {
|
||||||
|
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
|
||||||
|
}
|
||||||
|
|
||||||
|
// golistDriver uses the "go list" command to expand the pattern
|
||||||
|
// words and return metadata for the specified packages. dir may be
|
||||||
|
// "" and env may be nil, as per os/exec.Command.
|
||||||
|
func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||||
|
// go list uses the following identifiers in ImportPath and Imports:
|
||||||
|
//
|
||||||
|
// "p" -- importable package or main (command)
|
||||||
|
// "q.test" -- q's test executable
|
||||||
|
// "p [q.test]" -- variant of p as built for q's test executable
|
||||||
|
// "q_test [q.test]" -- q's external test package
|
||||||
|
//
|
||||||
|
// The packages p that are built differently for a test q.test
|
||||||
|
// are q itself, plus any helpers used by the external test q_test,
|
||||||
|
// typically including "testing" and all its dependencies.
|
||||||
|
|
||||||
|
// Run "go list" for complete
|
||||||
|
// information on the specified packages.
|
||||||
|
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
seen := make(map[string]*jsonPackage)
|
||||||
|
// Decode the JSON and convert it to Package form.
|
||||||
|
var response driverResponse
|
||||||
|
for dec := json.NewDecoder(buf); dec.More(); {
|
||||||
|
p := new(jsonPackage)
|
||||||
|
if err := dec.Decode(p); err != nil {
|
||||||
|
return nil, fmt.Errorf("JSON decoding failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.ImportPath == "" {
|
||||||
|
// The documentation for go list says that “[e]rroneous packages will have
|
||||||
|
// a non-empty ImportPath”. If for some reason it comes back empty, we
|
||||||
|
// prefer to error out rather than silently discarding data or handing
|
||||||
|
// back a package without any way to refer to it.
|
||||||
|
if p.Error != nil {
|
||||||
|
return nil, Error{
|
||||||
|
Pos: p.Error.Pos,
|
||||||
|
Msg: p.Error.Err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("package missing import path: %+v", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
if old, found := seen[p.ImportPath]; found {
|
||||||
|
if !reflect.DeepEqual(p, old) {
|
||||||
|
return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
|
||||||
|
}
|
||||||
|
// skip the duplicate
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[p.ImportPath] = p
|
||||||
|
|
||||||
|
pkg := &Package{
|
||||||
|
Name: p.Name,
|
||||||
|
ID: p.ImportPath,
|
||||||
|
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
||||||
|
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
|
||||||
|
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Work around https://golang.org/issue/28749:
|
||||||
|
// cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
|
||||||
|
// Filter out any elements of CompiledGoFiles that are also in OtherFiles.
|
||||||
|
// We have to keep this workaround in place until go1.12 is a distant memory.
|
||||||
|
if len(pkg.OtherFiles) > 0 {
|
||||||
|
other := make(map[string]bool, len(pkg.OtherFiles))
|
||||||
|
for _, f := range pkg.OtherFiles {
|
||||||
|
other[f] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
out := pkg.CompiledGoFiles[:0]
|
||||||
|
for _, f := range pkg.CompiledGoFiles {
|
||||||
|
if other[f] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, f)
|
||||||
|
}
|
||||||
|
pkg.CompiledGoFiles = out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the PkgPath from the package's ID.
|
||||||
|
if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
|
||||||
|
pkg.PkgPath = pkg.ID[:i]
|
||||||
|
} else {
|
||||||
|
pkg.PkgPath = pkg.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkg.PkgPath == "unsafe" {
|
||||||
|
pkg.GoFiles = nil // ignore fake unsafe.go file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume go list emits only absolute paths for Dir.
|
||||||
|
if p.Dir != "" && !filepath.IsAbs(p.Dir) {
|
||||||
|
log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Export != "" && !filepath.IsAbs(p.Export) {
|
||||||
|
pkg.ExportFile = filepath.Join(p.Dir, p.Export)
|
||||||
|
} else {
|
||||||
|
pkg.ExportFile = p.Export
|
||||||
|
}
|
||||||
|
|
||||||
|
// imports
|
||||||
|
//
|
||||||
|
// Imports contains the IDs of all imported packages.
|
||||||
|
// ImportsMap records (path, ID) only where they differ.
|
||||||
|
ids := make(map[string]bool)
|
||||||
|
for _, id := range p.Imports {
|
||||||
|
ids[id] = true
|
||||||
|
}
|
||||||
|
pkg.Imports = make(map[string]*Package)
|
||||||
|
for path, id := range p.ImportMap {
|
||||||
|
pkg.Imports[path] = &Package{ID: id} // non-identity import
|
||||||
|
delete(ids, id)
|
||||||
|
}
|
||||||
|
for id := range ids {
|
||||||
|
if id == "C" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pkg.Imports[id] = &Package{ID: id} // identity import
|
||||||
|
}
|
||||||
|
if !p.DepOnly {
|
||||||
|
response.Roots = append(response.Roots, pkg.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Work around for pre-go.1.11 versions of go list.
|
||||||
|
// TODO(matloob): they should be handled by the fallback.
|
||||||
|
// Can we delete this?
|
||||||
|
if len(pkg.CompiledGoFiles) == 0 {
|
||||||
|
pkg.CompiledGoFiles = pkg.GoFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Error != nil {
|
||||||
|
pkg.Errors = append(pkg.Errors, Error{
|
||||||
|
Pos: p.Error.Pos,
|
||||||
|
Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363.
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Packages = append(response.Packages, pkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// absJoin absolutizes and flattens the lists of files.
|
||||||
|
func absJoin(dir string, fileses ...[]string) (res []string) {
|
||||||
|
for _, files := range fileses {
|
||||||
|
for _, file := range files {
|
||||||
|
if !filepath.IsAbs(file) {
|
||||||
|
file = filepath.Join(dir, file)
|
||||||
|
}
|
||||||
|
res = append(res, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func golistargs(cfg *Config, words []string) []string {
|
||||||
|
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
|
||||||
|
fullargs := []string{
|
||||||
|
"list", "-e", "-json",
|
||||||
|
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
|
||||||
|
fmt.Sprintf("-test=%t", cfg.Tests),
|
||||||
|
fmt.Sprintf("-export=%t", usesExportData(cfg)),
|
||||||
|
fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0),
|
||||||
|
// go list doesn't let you pass -test and -find together,
|
||||||
|
// probably because you'd just get the TestMain.
|
||||||
|
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
|
||||||
|
}
|
||||||
|
fullargs = append(fullargs, cfg.BuildFlags...)
|
||||||
|
fullargs = append(fullargs, "--")
|
||||||
|
fullargs = append(fullargs, words...)
|
||||||
|
return fullargs
|
||||||
|
}
|
||||||
|
|
||||||
|
// invokeGo returns the stdout of a go command invocation.
|
||||||
|
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||||
|
stdout := new(bytes.Buffer)
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(cfg.Context, "go", args...)
|
||||||
|
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||||
|
// expects the working directory to keep the original path, including the
|
||||||
|
// go command when dealing with modules.
|
||||||
|
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||||
|
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||||
|
// process we fix up all the paths returned by the go command.
|
||||||
|
cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
|
||||||
|
cmd.Dir = cfg.Dir
|
||||||
|
cmd.Stdout = stdout
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if debug {
|
||||||
|
defer func(start time.Time) {
|
||||||
|
log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
|
||||||
|
}(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
// Check for 'go' executable not being found.
|
||||||
|
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
||||||
|
return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
exitErr, ok := err.(*exec.ExitError)
|
||||||
|
if !ok {
|
||||||
|
// Catastrophic error:
|
||||||
|
// - context cancellation
|
||||||
|
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Old go version?
|
||||||
|
if strings.Contains(stderr.String(), "flag provided but not defined") {
|
||||||
|
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
|
||||||
|
// the error in the Err section of stdout in case -e option is provided.
|
||||||
|
// This fix is provided for backwards compatibility.
|
||||||
|
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
|
||||||
|
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||||
|
strings.Trim(stderr.String(), "\n"))
|
||||||
|
return bytes.NewBufferString(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
|
||||||
|
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
|
||||||
|
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||||
|
strings.Trim(stderr.String(), "\n"))
|
||||||
|
return bytes.NewBufferString(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
|
||||||
|
// status if there's a dependency on a package that doesn't exist. But it should return
|
||||||
|
// a zero exit status and set an error on that package.
|
||||||
|
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
|
||||||
|
// try to extract package name from string
|
||||||
|
stderrStr := stderr.String()
|
||||||
|
var importPath string
|
||||||
|
colon := strings.Index(stderrStr, ":")
|
||||||
|
if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
|
||||||
|
importPath = stderrStr[len("go build "):colon]
|
||||||
|
}
|
||||||
|
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||||
|
importPath, strings.Trim(stderrStr, "\n"))
|
||||||
|
return bytes.NewBufferString(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export mode entails a build.
|
||||||
|
// If that build fails, errors appear on stderr
|
||||||
|
// (despite the -e flag) and the Export field is blank.
|
||||||
|
// Do not fail in that case.
|
||||||
|
// The same is true if an ad-hoc package given to go list doesn't exist.
|
||||||
|
// TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
|
||||||
|
// packages don't exist or a build fails.
|
||||||
|
if !usesExportData(cfg) && !containsGoFile(args) {
|
||||||
|
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// As of writing, go list -export prints some non-fatal compilation
|
||||||
|
// errors to stderr, even with -e set. We would prefer that it put
|
||||||
|
// them in the Package.Error JSON (see https://golang.org/issue/26319).
|
||||||
|
// In the meantime, there's nowhere good to put them, but they can
|
||||||
|
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
|
||||||
|
// is set.
|
||||||
|
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugging
|
||||||
|
if false {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsGoFile(s []string) bool {
|
||||||
|
for _, f := range s {
|
||||||
|
if strings.HasSuffix(f, ".go") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdDebugStr(cmd *exec.Cmd, args ...string) string {
|
||||||
|
env := make(map[string]string)
|
||||||
|
for _, kv := range cmd.Env {
|
||||||
|
split := strings.Split(kv, "=")
|
||||||
|
k, v := split[0], split[1]
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
var quotedArgs []string
|
||||||
|
for _, arg := range args {
|
||||||
|
quotedArgs = append(quotedArgs, strconv.Quote(arg))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " "))
|
||||||
|
}
|
|
@ -0,0 +1,300 @@
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// processGolistOverlay provides rudimentary support for adding
|
||||||
|
// files that don't exist on disk to an overlay. The results can be
|
||||||
|
// sometimes incorrect.
|
||||||
|
// TODO(matloob): Handle unsupported cases, including the following:
|
||||||
|
// - determining the correct package to add given a new import path
|
||||||
|
func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
|
||||||
|
havePkgs := make(map[string]string) // importPath -> non-test package ID
|
||||||
|
needPkgsSet := make(map[string]bool)
|
||||||
|
modifiedPkgsSet := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, pkg := range response.dr.Packages {
|
||||||
|
// This is an approximation of import path to id. This can be
|
||||||
|
// wrong for tests, vendored packages, and a number of other cases.
|
||||||
|
havePkgs[pkg.PkgPath] = pkg.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
var rootDirs map[string]string
|
||||||
|
var onceGetRootDirs sync.Once
|
||||||
|
|
||||||
|
// If no new imports are added, it is safe to avoid loading any needPkgs.
|
||||||
|
// Otherwise, it's hard to tell which package is actually being loaded
|
||||||
|
// (due to vendoring) and whether any modified package will show up
|
||||||
|
// in the transitive set of dependencies (because new imports are added,
|
||||||
|
// potentially modifying the transitive set of dependencies).
|
||||||
|
var overlayAddsImports bool
|
||||||
|
|
||||||
|
for opath, contents := range cfg.Overlay {
|
||||||
|
base := filepath.Base(opath)
|
||||||
|
dir := filepath.Dir(opath)
|
||||||
|
var pkg *Package
|
||||||
|
var testVariantOf *Package // if opath is a test file, this is the package it is testing
|
||||||
|
var fileExists bool
|
||||||
|
isTest := strings.HasSuffix(opath, "_test.go")
|
||||||
|
pkgName, ok := extractPackageName(opath, contents)
|
||||||
|
if !ok {
|
||||||
|
// Don't bother adding a file that doesn't even have a parsable package statement
|
||||||
|
// to the overlay.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nextPackage:
|
||||||
|
for _, p := range response.dr.Packages {
|
||||||
|
if pkgName != p.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, f := range p.GoFiles {
|
||||||
|
if !sameFile(filepath.Dir(f), dir) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isTest && !hasTestFiles(p) {
|
||||||
|
// TODO(matloob): Are there packages other than the 'production' variant
|
||||||
|
// of a package that this can match? This shouldn't match the test main package
|
||||||
|
// because the file is generated in another directory.
|
||||||
|
testVariantOf = p
|
||||||
|
continue nextPackage
|
||||||
|
}
|
||||||
|
pkg = p
|
||||||
|
if filepath.Base(f) == base {
|
||||||
|
fileExists = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The overlay could have included an entirely new package.
|
||||||
|
if pkg == nil {
|
||||||
|
onceGetRootDirs.Do(func() {
|
||||||
|
rootDirs = determineRootDirs(cfg)
|
||||||
|
})
|
||||||
|
// Try to find the module or gopath dir the file is contained in.
|
||||||
|
// Then for modules, add the module opath to the beginning.
|
||||||
|
var pkgPath string
|
||||||
|
for rdir, rpath := range rootDirs {
|
||||||
|
// TODO(matloob): This doesn't properly handle symlinks.
|
||||||
|
r, err := filepath.Rel(rdir, dir)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pkgPath = filepath.ToSlash(r)
|
||||||
|
if rpath != "" {
|
||||||
|
pkgPath = path.Join(rpath, pkgPath)
|
||||||
|
}
|
||||||
|
// We only create one new package even it can belong in multiple modules or GOPATH entries.
|
||||||
|
// This is okay because tools (such as the LSP) that use overlays will recompute the overlay
|
||||||
|
// once the file is saved, and golist will do the right thing.
|
||||||
|
// TODO(matloob): Implement module tiebreaking?
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if pkgPath == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
isXTest := strings.HasSuffix(pkgName, "_test")
|
||||||
|
if isXTest {
|
||||||
|
pkgPath += "_test"
|
||||||
|
}
|
||||||
|
id := pkgPath
|
||||||
|
if isTest && !isXTest {
|
||||||
|
id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
|
||||||
|
}
|
||||||
|
// Try to reclaim a package with the same id if it exists in the response.
|
||||||
|
for _, p := range response.dr.Packages {
|
||||||
|
if reclaimPackage(p, id, opath, contents) {
|
||||||
|
pkg = p
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Otherwise, create a new package
|
||||||
|
if pkg == nil {
|
||||||
|
pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)}
|
||||||
|
response.addPackage(pkg)
|
||||||
|
havePkgs[pkg.PkgPath] = id
|
||||||
|
// Add the production package's sources for a test variant.
|
||||||
|
if isTest && !isXTest && testVariantOf != nil {
|
||||||
|
pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
|
||||||
|
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !fileExists {
|
||||||
|
pkg.GoFiles = append(pkg.GoFiles, opath)
|
||||||
|
// TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior
|
||||||
|
// if the file will be ignored due to its build tags.
|
||||||
|
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath)
|
||||||
|
modifiedPkgsSet[pkg.ID] = true
|
||||||
|
}
|
||||||
|
imports, err := extractImports(opath, contents)
|
||||||
|
if err != nil {
|
||||||
|
// Let the parser or type checker report errors later.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, imp := range imports {
|
||||||
|
_, found := pkg.Imports[imp]
|
||||||
|
if !found {
|
||||||
|
overlayAddsImports = true
|
||||||
|
// TODO(matloob): Handle cases when the following block isn't correct.
|
||||||
|
// These include imports of test variants, imports of vendored packages, etc.
|
||||||
|
id, ok := havePkgs[imp]
|
||||||
|
if !ok {
|
||||||
|
id = imp
|
||||||
|
}
|
||||||
|
pkg.Imports[imp] = &Package{ID: id}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// toPkgPath tries to guess the package path given the id.
|
||||||
|
// This isn't always correct -- it's certainly wrong for
|
||||||
|
// vendored packages' paths.
|
||||||
|
toPkgPath := func(id string) string {
|
||||||
|
// TODO(matloob): Handle vendor paths.
|
||||||
|
i := strings.IndexByte(id, ' ')
|
||||||
|
if i >= 0 {
|
||||||
|
return id[:i]
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do another pass now that new packages have been created to determine the
|
||||||
|
// set of missing packages.
|
||||||
|
for _, pkg := range response.dr.Packages {
|
||||||
|
for _, imp := range pkg.Imports {
|
||||||
|
pkgPath := toPkgPath(imp.ID)
|
||||||
|
if _, ok := havePkgs[pkgPath]; !ok {
|
||||||
|
needPkgsSet[pkgPath] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if overlayAddsImports {
|
||||||
|
needPkgs = make([]string, 0, len(needPkgsSet))
|
||||||
|
for pkg := range needPkgsSet {
|
||||||
|
needPkgs = append(needPkgs, pkg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
|
||||||
|
for pkg := range modifiedPkgsSet {
|
||||||
|
modifiedPkgs = append(modifiedPkgs, pkg)
|
||||||
|
}
|
||||||
|
return modifiedPkgs, needPkgs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasTestFiles(p *Package) bool {
|
||||||
|
for _, f := range p.GoFiles {
|
||||||
|
if strings.HasSuffix(f, "_test.go") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// determineRootDirs returns a mapping from directories code can be contained in to the
|
||||||
|
// corresponding import path prefixes of those directories.
|
||||||
|
// Its result is used to try to determine the import path for a package containing
|
||||||
|
// an overlay file.
|
||||||
|
func determineRootDirs(cfg *Config) map[string]string {
|
||||||
|
// Assume modules first:
|
||||||
|
out, err := invokeGo(cfg, "list", "-m", "-json", "all")
|
||||||
|
if err != nil {
|
||||||
|
return determineRootDirsGOPATH(cfg)
|
||||||
|
}
|
||||||
|
m := map[string]string{}
|
||||||
|
type jsonMod struct{ Path, Dir string }
|
||||||
|
for dec := json.NewDecoder(out); dec.More(); {
|
||||||
|
mod := new(jsonMod)
|
||||||
|
if err := dec.Decode(mod); err != nil {
|
||||||
|
return m // Give up and return an empty map. Package won't be found for overlay.
|
||||||
|
}
|
||||||
|
if mod.Dir != "" && mod.Path != "" {
|
||||||
|
// This is a valid module; add it to the map.
|
||||||
|
m[mod.Dir] = mod.Path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func determineRootDirsGOPATH(cfg *Config) map[string]string {
|
||||||
|
m := map[string]string{}
|
||||||
|
out, err := invokeGo(cfg, "env", "GOPATH")
|
||||||
|
if err != nil {
|
||||||
|
// Could not determine root dir mapping. Everything is best-effort, so just return an empty map.
|
||||||
|
// When we try to find the import path for a directory, there will be no root-dir match and
|
||||||
|
// we'll give up.
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) {
|
||||||
|
m[filepath.Join(p, "src")] = ""
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractImports(filename string, contents []byte) ([]string, error) {
|
||||||
|
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []string
|
||||||
|
for _, imp := range f.Imports {
|
||||||
|
quotedPath := imp.Path.Value
|
||||||
|
path, err := strconv.Unquote(quotedPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res = append(res, path)
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reclaimPackage attempts to reuse a package that failed to load in an overlay.
|
||||||
|
//
|
||||||
|
// If the package has errors and has no Name, GoFiles, or Imports,
|
||||||
|
// then it's possible that it doesn't yet exist on disk.
|
||||||
|
func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool {
|
||||||
|
// TODO(rstambler): Check the message of the actual error?
|
||||||
|
// It differs between $GOPATH and module mode.
|
||||||
|
if pkg.ID != id {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(pkg.Errors) != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if pkg.Name != "" || pkg.ExportFile != "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(pkg.Imports) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
pkgName, ok := extractPackageName(filename, contents)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
pkg.Name = pkgName
|
||||||
|
pkg.Errors = nil
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractPackageName(filename string, contents []byte) (string, bool) {
|
||||||
|
// TODO(rstambler): Check the message of the actual error?
|
||||||
|
// It differs between $GOPATH and module mode.
|
||||||
|
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset?
|
||||||
|
if err != nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return f.Name.Name, true
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,55 @@
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Visit visits all the packages in the import graph whose roots are
|
||||||
|
// pkgs, calling the optional pre function the first time each package
|
||||||
|
// is encountered (preorder), and the optional post function after a
|
||||||
|
// package's dependencies have been visited (postorder).
|
||||||
|
// The boolean result of pre(pkg) determines whether
|
||||||
|
// the imports of package pkg are visited.
|
||||||
|
func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
|
||||||
|
seen := make(map[*Package]bool)
|
||||||
|
var visit func(*Package)
|
||||||
|
visit = func(pkg *Package) {
|
||||||
|
if !seen[pkg] {
|
||||||
|
seen[pkg] = true
|
||||||
|
|
||||||
|
if pre == nil || pre(pkg) {
|
||||||
|
paths := make([]string, 0, len(pkg.Imports))
|
||||||
|
for path := range pkg.Imports {
|
||||||
|
paths = append(paths, path)
|
||||||
|
}
|
||||||
|
sort.Strings(paths) // Imports is a map, this makes visit stable
|
||||||
|
for _, path := range paths {
|
||||||
|
visit(pkg.Imports[path])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if post != nil {
|
||||||
|
post(pkg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, pkg := range pkgs {
|
||||||
|
visit(pkg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintErrors prints to os.Stderr the accumulated errors of all
|
||||||
|
// packages in the import graph rooted at pkgs, dependencies first.
|
||||||
|
// PrintErrors returns the number of errors printed.
|
||||||
|
func PrintErrors(pkgs []*Package) int {
|
||||||
|
var n int
|
||||||
|
Visit(pkgs, nil, func(pkg *Package) {
|
||||||
|
for _, err := range pkg.Errors {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return n
|
||||||
|
}
|
|
@ -0,0 +1,196 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package fastwalk provides a faster version of filepath.Walk for file system
|
||||||
|
// scanning tools.
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TraverseLink is used as a return value from WalkFuncs to indicate that the
|
||||||
|
// symlink named in the call may be traversed.
|
||||||
|
var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
||||||
|
|
||||||
|
// SkipFiles is a used as a return value from WalkFuncs to indicate that the
|
||||||
|
// callback should not be called for any other files in the current directory.
|
||||||
|
// Child directories will still be traversed.
|
||||||
|
var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
||||||
|
|
||||||
|
// Walk is a faster implementation of filepath.Walk.
|
||||||
|
//
|
||||||
|
// filepath.Walk's design necessarily calls os.Lstat on each file,
|
||||||
|
// even if the caller needs less info.
|
||||||
|
// Many tools need only the type of each file.
|
||||||
|
// On some platforms, this information is provided directly by the readdir
|
||||||
|
// system call, avoiding the need to stat each file individually.
|
||||||
|
// fastwalk_unix.go contains a fork of the syscall routines.
|
||||||
|
//
|
||||||
|
// See golang.org/issue/16399
|
||||||
|
//
|
||||||
|
// Walk walks the file tree rooted at root, calling walkFn for
|
||||||
|
// each file or directory in the tree, including root.
|
||||||
|
//
|
||||||
|
// If fastWalk returns filepath.SkipDir, the directory is skipped.
|
||||||
|
//
|
||||||
|
// Unlike filepath.Walk:
|
||||||
|
// * file stat calls must be done by the user.
|
||||||
|
// The only provided metadata is the file type, which does not include
|
||||||
|
// any permission bits.
|
||||||
|
// * multiple goroutines stat the filesystem concurrently. The provided
|
||||||
|
// walkFn must be safe for concurrent use.
|
||||||
|
// * fastWalk can follow symlinks if walkFn returns the TraverseLink
|
||||||
|
// sentinel error. It is the walkFn's responsibility to prevent
|
||||||
|
// fastWalk from going into symlink cycles.
|
||||||
|
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
|
||||||
|
// TODO(bradfitz): make numWorkers configurable? We used a
|
||||||
|
// minimum of 4 to give the kernel more info about multiple
|
||||||
|
// things we want, in hopes its I/O scheduling can take
|
||||||
|
// advantage of that. Hopefully most are in cache. Maybe 4 is
|
||||||
|
// even too low of a minimum. Profile more.
|
||||||
|
numWorkers := 4
|
||||||
|
if n := runtime.NumCPU(); n > numWorkers {
|
||||||
|
numWorkers = n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure to wait for all workers to finish, otherwise
|
||||||
|
// walkFn could still be called after returning. This Wait call
|
||||||
|
// runs after close(e.donec) below.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
defer wg.Wait()
|
||||||
|
|
||||||
|
w := &walker{
|
||||||
|
fn: walkFn,
|
||||||
|
enqueuec: make(chan walkItem, numWorkers), // buffered for performance
|
||||||
|
workc: make(chan walkItem, numWorkers), // buffered for performance
|
||||||
|
donec: make(chan struct{}),
|
||||||
|
|
||||||
|
// buffered for correctness & not leaking goroutines:
|
||||||
|
resc: make(chan error, numWorkers),
|
||||||
|
}
|
||||||
|
defer close(w.donec)
|
||||||
|
|
||||||
|
for i := 0; i < numWorkers; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go w.doWork(&wg)
|
||||||
|
}
|
||||||
|
todo := []walkItem{{dir: root}}
|
||||||
|
out := 0
|
||||||
|
for {
|
||||||
|
workc := w.workc
|
||||||
|
var workItem walkItem
|
||||||
|
if len(todo) == 0 {
|
||||||
|
workc = nil
|
||||||
|
} else {
|
||||||
|
workItem = todo[len(todo)-1]
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case workc <- workItem:
|
||||||
|
todo = todo[:len(todo)-1]
|
||||||
|
out++
|
||||||
|
case it := <-w.enqueuec:
|
||||||
|
todo = append(todo, it)
|
||||||
|
case err := <-w.resc:
|
||||||
|
out--
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if out == 0 && len(todo) == 0 {
|
||||||
|
// It's safe to quit here, as long as the buffered
|
||||||
|
// enqueue channel isn't also readable, which might
|
||||||
|
// happen if the worker sends both another unit of
|
||||||
|
// work and its result before the other select was
|
||||||
|
// scheduled and both w.resc and w.enqueuec were
|
||||||
|
// readable.
|
||||||
|
select {
|
||||||
|
case it := <-w.enqueuec:
|
||||||
|
todo = append(todo, it)
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// doWork reads directories as instructed (via workc) and runs the
|
||||||
|
// user's callback function.
|
||||||
|
func (w *walker) doWork(wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-w.donec:
|
||||||
|
return
|
||||||
|
case it := <-w.workc:
|
||||||
|
select {
|
||||||
|
case <-w.donec:
|
||||||
|
return
|
||||||
|
case w.resc <- w.walk(it.dir, !it.callbackDone):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type walker struct {
|
||||||
|
fn func(path string, typ os.FileMode) error
|
||||||
|
|
||||||
|
donec chan struct{} // closed on fastWalk's return
|
||||||
|
workc chan walkItem // to workers
|
||||||
|
enqueuec chan walkItem // from workers
|
||||||
|
resc chan error // from workers
|
||||||
|
}
|
||||||
|
|
||||||
|
type walkItem struct {
|
||||||
|
dir string
|
||||||
|
callbackDone bool // callback already called; don't do it again
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) enqueue(it walkItem) {
|
||||||
|
select {
|
||||||
|
case w.enqueuec <- it:
|
||||||
|
case <-w.donec:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
|
||||||
|
joined := dirName + string(os.PathSeparator) + baseName
|
||||||
|
if typ == os.ModeDir {
|
||||||
|
w.enqueue(walkItem{dir: joined})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.fn(joined, typ)
|
||||||
|
if typ == os.ModeSymlink {
|
||||||
|
if err == TraverseLink {
|
||||||
|
// Set callbackDone so we don't call it twice for both the
|
||||||
|
// symlink-as-symlink and the symlink-as-directory later:
|
||||||
|
w.enqueue(walkItem{dir: joined, callbackDone: true})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err == filepath.SkipDir {
|
||||||
|
// Permit SkipDir on symlinks too.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) walk(root string, runUserCallback bool) error {
|
||||||
|
if runUserCallback {
|
||||||
|
err := w.fn(root, os.ModeDir)
|
||||||
|
if err == filepath.SkipDir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return readDir(root, w.onDirEnt)
|
||||||
|
}
|
13
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build freebsd openbsd netbsd
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func direntInode(dirent *syscall.Dirent) uint64 {
|
||||||
|
return uint64(dirent.Fileno)
|
||||||
|
}
|
14
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
generated
vendored
Normal file
14
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func direntInode(dirent *syscall.Dirent) uint64 {
|
||||||
|
return uint64(dirent.Ino)
|
||||||
|
}
|
13
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin freebsd openbsd netbsd
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func direntNamlen(dirent *syscall.Dirent) uint64 {
|
||||||
|
return uint64(dirent.Namlen)
|
||||||
|
}
|
29
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
generated
vendored
Normal file
29
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func direntNamlen(dirent *syscall.Dirent) uint64 {
|
||||||
|
const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
|
||||||
|
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
|
const nameBufLen = uint16(len(nameBuf))
|
||||||
|
limit := dirent.Reclen - fixedHdr
|
||||||
|
if limit > nameBufLen {
|
||||||
|
limit = nameBufLen
|
||||||
|
}
|
||||||
|
nameLen := bytes.IndexByte(nameBuf[:limit], 0)
|
||||||
|
if nameLen < 0 {
|
||||||
|
panic("failed to find terminating 0 byte in dirent")
|
||||||
|
}
|
||||||
|
return uint64(nameLen)
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// readDir calls fn for each directory entry in dirName.
|
||||||
|
// It does not descend into directories or follow symlinks.
|
||||||
|
// If fn returns a non-nil error, readDir returns with that error
|
||||||
|
// immediately.
|
||||||
|
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
|
||||||
|
fis, err := ioutil.ReadDir(dirName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
skipFiles := false
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.Mode().IsRegular() && skipFiles {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
|
||||||
|
if err == SkipFiles {
|
||||||
|
skipFiles = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin freebsd openbsd netbsd
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const blockSize = 8 << 10
|
||||||
|
|
||||||
|
// unknownFileMode is a sentinel (and bogus) os.FileMode
|
||||||
|
// value used to represent a syscall.DT_UNKNOWN Dirent.Type.
|
||||||
|
const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
|
||||||
|
|
||||||
|
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
|
||||||
|
fd, err := syscall.Open(dirName, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return &os.PathError{Op: "open", Path: dirName, Err: err}
|
||||||
|
}
|
||||||
|
defer syscall.Close(fd)
|
||||||
|
|
||||||
|
// The buffer must be at least a block long.
|
||||||
|
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
|
||||||
|
bufp := 0 // starting read position in buf
|
||||||
|
nbuf := 0 // end valid data in buf
|
||||||
|
skipFiles := false
|
||||||
|
for {
|
||||||
|
if bufp >= nbuf {
|
||||||
|
bufp = 0
|
||||||
|
nbuf, err = syscall.ReadDirent(fd, buf)
|
||||||
|
if err != nil {
|
||||||
|
return os.NewSyscallError("readdirent", err)
|
||||||
|
}
|
||||||
|
if nbuf <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
consumed, name, typ := parseDirEnt(buf[bufp:nbuf])
|
||||||
|
bufp += consumed
|
||||||
|
if name == "" || name == "." || name == ".." {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Fallback for filesystems (like old XFS) that don't
|
||||||
|
// support Dirent.Type and have DT_UNKNOWN (0) there
|
||||||
|
// instead.
|
||||||
|
if typ == unknownFileMode {
|
||||||
|
fi, err := os.Lstat(dirName + "/" + name)
|
||||||
|
if err != nil {
|
||||||
|
// It got deleted in the meantime.
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
typ = fi.Mode() & os.ModeType
|
||||||
|
}
|
||||||
|
if skipFiles && typ.IsRegular() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := fn(dirName, name, typ); err != nil {
|
||||||
|
if err == SkipFiles {
|
||||||
|
skipFiles = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
|
||||||
|
// golang.org/issue/15653
|
||||||
|
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||||
|
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
|
||||||
|
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
|
||||||
|
}
|
||||||
|
if len(buf) < int(dirent.Reclen) {
|
||||||
|
panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
|
||||||
|
}
|
||||||
|
consumed = int(dirent.Reclen)
|
||||||
|
if direntInode(dirent) == 0 { // File absent in directory.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch dirent.Type {
|
||||||
|
case syscall.DT_REG:
|
||||||
|
typ = 0
|
||||||
|
case syscall.DT_DIR:
|
||||||
|
typ = os.ModeDir
|
||||||
|
case syscall.DT_LNK:
|
||||||
|
typ = os.ModeSymlink
|
||||||
|
case syscall.DT_BLK:
|
||||||
|
typ = os.ModeDevice
|
||||||
|
case syscall.DT_FIFO:
|
||||||
|
typ = os.ModeNamedPipe
|
||||||
|
case syscall.DT_SOCK:
|
||||||
|
typ = os.ModeSocket
|
||||||
|
case syscall.DT_UNKNOWN:
|
||||||
|
typ = unknownFileMode
|
||||||
|
default:
|
||||||
|
// Skip weird things.
|
||||||
|
// It's probably a DT_WHT (http://lwn.net/Articles/325369/)
|
||||||
|
// or something. Revisit if/when this package is moved outside
|
||||||
|
// of goimports. goimports only cares about regular files,
|
||||||
|
// symlinks, and directories.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
|
nameLen := direntNamlen(dirent)
|
||||||
|
|
||||||
|
// Special cases for common things:
|
||||||
|
if nameLen == 1 && nameBuf[0] == '.' {
|
||||||
|
name = "."
|
||||||
|
} else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' {
|
||||||
|
name = ".."
|
||||||
|
} else {
|
||||||
|
name = string(nameBuf[:nameLen])
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,250 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package gopathwalk is like filepath.Walk but specialized for finding Go
|
||||||
|
// packages, particularly in $GOPATH and $GOROOT.
|
||||||
|
package gopathwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/build"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/tools/internal/fastwalk"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options controls the behavior of a Walk call.
|
||||||
|
type Options struct {
|
||||||
|
Debug bool // Enable debug logging
|
||||||
|
ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
|
||||||
|
}
|
||||||
|
|
||||||
|
// RootType indicates the type of a Root.
|
||||||
|
type RootType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RootUnknown RootType = iota
|
||||||
|
RootGOROOT
|
||||||
|
RootGOPATH
|
||||||
|
RootCurrentModule
|
||||||
|
RootModuleCache
|
||||||
|
RootOther
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Root is a starting point for a Walk.
|
||||||
|
type Root struct {
|
||||||
|
Path string
|
||||||
|
Type RootType
|
||||||
|
}
|
||||||
|
|
||||||
|
// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible.
|
||||||
|
func SrcDirsRoots(ctx *build.Context) []Root {
|
||||||
|
var roots []Root
|
||||||
|
roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT})
|
||||||
|
for _, p := range filepath.SplitList(ctx.GOPATH) {
|
||||||
|
roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH})
|
||||||
|
}
|
||||||
|
return roots
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||||
|
// For each package found, add will be called (concurrently) with the absolute
|
||||||
|
// paths of the containing source directory and the package directory.
|
||||||
|
// add will be called concurrently.
|
||||||
|
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
|
||||||
|
for _, root := range roots {
|
||||||
|
walkDir(root, add, opts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkDir(root Root, add func(Root, string), opts Options) {
|
||||||
|
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||||
|
if opts.Debug {
|
||||||
|
log.Printf("skipping nonexistant directory: %v", root.Path)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if opts.Debug {
|
||||||
|
log.Printf("scanning %s", root.Path)
|
||||||
|
}
|
||||||
|
w := &walker{
|
||||||
|
root: root,
|
||||||
|
add: add,
|
||||||
|
opts: opts,
|
||||||
|
}
|
||||||
|
w.init()
|
||||||
|
if err := fastwalk.Walk(root.Path, w.walk); err != nil {
|
||||||
|
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Debug {
|
||||||
|
log.Printf("scanned %s", root.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// walker is the callback for fastwalk.Walk.
|
||||||
|
type walker struct {
|
||||||
|
root Root // The source directory to scan.
|
||||||
|
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
|
||||||
|
opts Options // Options passed to Walk by the user.
|
||||||
|
|
||||||
|
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes the walker based on its Options.
|
||||||
|
func (w *walker) init() {
|
||||||
|
var ignoredPaths []string
|
||||||
|
if w.root.Type == RootModuleCache {
|
||||||
|
ignoredPaths = []string{"cache"}
|
||||||
|
}
|
||||||
|
if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
|
||||||
|
ignoredPaths = w.getIgnoredDirs(w.root.Path)
|
||||||
|
ignoredPaths = append(ignoredPaths, "v", "mod")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range ignoredPaths {
|
||||||
|
full := filepath.Join(w.root.Path, p)
|
||||||
|
if fi, err := os.Stat(full); err == nil {
|
||||||
|
w.ignoredDirs = append(w.ignoredDirs, fi)
|
||||||
|
if w.opts.Debug {
|
||||||
|
log.Printf("Directory added to ignore list: %s", full)
|
||||||
|
}
|
||||||
|
} else if w.opts.Debug {
|
||||||
|
log.Printf("Error statting ignored directory: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
|
||||||
|
// of relative directories to ignore when scanning for go files.
|
||||||
|
// The provided path is one of the $GOPATH entries with "src" appended.
|
||||||
|
func (w *walker) getIgnoredDirs(path string) []string {
|
||||||
|
file := filepath.Join(path, ".goimportsignore")
|
||||||
|
slurp, err := ioutil.ReadFile(file)
|
||||||
|
if w.opts.Debug {
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Read %s", file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var ignoredDirs []string
|
||||||
|
bs := bufio.NewScanner(bytes.NewReader(slurp))
|
||||||
|
for bs.Scan() {
|
||||||
|
line := strings.TrimSpace(bs.Text())
|
||||||
|
if line == "" || strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ignoredDirs = append(ignoredDirs, line)
|
||||||
|
}
|
||||||
|
return ignoredDirs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
|
||||||
|
for _, ignoredDir := range w.ignoredDirs {
|
||||||
|
if os.SameFile(fi, ignoredDir) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) walk(path string, typ os.FileMode) error {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if typ.IsRegular() {
|
||||||
|
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||||
|
// Doesn't make sense to have regular files
|
||||||
|
// directly in your $GOPATH/src or $GOROOT/src.
|
||||||
|
return fastwalk.SkipFiles
|
||||||
|
}
|
||||||
|
if !strings.HasSuffix(path, ".go") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.add(w.root, dir)
|
||||||
|
return fastwalk.SkipFiles
|
||||||
|
}
|
||||||
|
if typ == os.ModeDir {
|
||||||
|
base := filepath.Base(path)
|
||||||
|
if base == "" || base[0] == '.' || base[0] == '_' ||
|
||||||
|
base == "testdata" ||
|
||||||
|
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
|
||||||
|
(!w.opts.ModulesEnabled && base == "node_modules") {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
fi, err := os.Lstat(path)
|
||||||
|
if err == nil && w.shouldSkipDir(fi) {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if typ == os.ModeSymlink {
|
||||||
|
base := filepath.Base(path)
|
||||||
|
if strings.HasPrefix(base, ".#") {
|
||||||
|
// Emacs noise.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fi, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
// Just ignore it.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if w.shouldTraverse(dir, fi) {
|
||||||
|
return fastwalk.TraverseLink
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldTraverse reports whether the symlink fi, found in dir,
|
||||||
|
// should be followed. It makes sure symlinks were never visited
|
||||||
|
// before to avoid symlink loops.
|
||||||
|
func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
|
||||||
|
path := filepath.Join(dir, fi.Name())
|
||||||
|
target, err := filepath.EvalSymlinks(path)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ts, err := os.Stat(target)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !ts.IsDir() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if w.shouldSkipDir(ts) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Check for symlink loops by statting each directory component
|
||||||
|
// and seeing if any are the same file as ts.
|
||||||
|
for {
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
if parent == path {
|
||||||
|
// Made it to the root without seeing a cycle.
|
||||||
|
// Use this symlink.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
parentInfo, err := os.Stat(parent)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if os.SameFile(ts, parentInfo) {
|
||||||
|
// Cycle. Don't traverse.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
path = parent
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,388 @@
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package semver implements comparison of semantic version strings.
|
||||||
|
// In this package, semantic version strings must begin with a leading "v",
|
||||||
|
// as in "v1.0.0".
|
||||||
|
//
|
||||||
|
// The general form of a semantic version string accepted by this package is
|
||||||
|
//
|
||||||
|
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
|
||||||
|
//
|
||||||
|
// where square brackets indicate optional parts of the syntax;
|
||||||
|
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
|
||||||
|
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
|
||||||
|
// using only alphanumeric characters and hyphens; and
|
||||||
|
// all-numeric PRERELEASE identifiers must not have leading zeros.
|
||||||
|
//
|
||||||
|
// This package follows Semantic Versioning 2.0.0 (see semver.org)
|
||||||
|
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
|
||||||
|
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
|
||||||
|
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
|
||||||
|
package semver
|
||||||
|
|
||||||
|
// parsed returns the parsed form of a semantic version string.
|
||||||
|
type parsed struct {
|
||||||
|
major string
|
||||||
|
minor string
|
||||||
|
patch string
|
||||||
|
short string
|
||||||
|
prerelease string
|
||||||
|
build string
|
||||||
|
err string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid reports whether v is a valid semantic version string.
|
||||||
|
func IsValid(v string) bool {
|
||||||
|
_, ok := parse(v)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonical returns the canonical formatting of the semantic version v.
|
||||||
|
// It fills in any missing .MINOR or .PATCH and discards build metadata.
|
||||||
|
// Two semantic versions compare equal only if their canonical formattings
|
||||||
|
// are identical strings.
|
||||||
|
// The canonical invalid semantic version is the empty string.
|
||||||
|
func Canonical(v string) string {
|
||||||
|
p, ok := parse(v)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if p.build != "" {
|
||||||
|
return v[:len(v)-len(p.build)]
|
||||||
|
}
|
||||||
|
if p.short != "" {
|
||||||
|
return v + p.short
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Major returns the major version prefix of the semantic version v.
|
||||||
|
// For example, Major("v2.1.0") == "v2".
|
||||||
|
// If v is an invalid semantic version string, Major returns the empty string.
|
||||||
|
func Major(v string) string {
|
||||||
|
pv, ok := parse(v)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return v[:1+len(pv.major)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// MajorMinor returns the major.minor version prefix of the semantic version v.
|
||||||
|
// For example, MajorMinor("v2.1.0") == "v2.1".
|
||||||
|
// If v is an invalid semantic version string, MajorMinor returns the empty string.
|
||||||
|
func MajorMinor(v string) string {
|
||||||
|
pv, ok := parse(v)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
i := 1 + len(pv.major)
|
||||||
|
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
|
||||||
|
return v[:j]
|
||||||
|
}
|
||||||
|
return v[:i] + "." + pv.minor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prerelease returns the prerelease suffix of the semantic version v.
|
||||||
|
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
|
||||||
|
// If v is an invalid semantic version string, Prerelease returns the empty string.
|
||||||
|
func Prerelease(v string) string {
|
||||||
|
pv, ok := parse(v)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return pv.prerelease
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build returns the build suffix of the semantic version v.
|
||||||
|
// For example, Build("v2.1.0+meta") == "+meta".
|
||||||
|
// If v is an invalid semantic version string, Build returns the empty string.
|
||||||
|
func Build(v string) string {
|
||||||
|
pv, ok := parse(v)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return pv.build
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare returns an integer comparing two versions according to
|
||||||
|
// according to semantic version precedence.
|
||||||
|
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
|
||||||
|
//
|
||||||
|
// An invalid semantic version string is considered less than a valid one.
|
||||||
|
// All invalid semantic version strings compare equal to each other.
|
||||||
|
func Compare(v, w string) int {
|
||||||
|
pv, ok1 := parse(v)
|
||||||
|
pw, ok2 := parse(w)
|
||||||
|
if !ok1 && !ok2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if !ok1 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if !ok2 {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
if c := compareInt(pv.major, pw.major); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
if c := compareInt(pv.minor, pw.minor); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
if c := compareInt(pv.patch, pw.patch); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return comparePrerelease(pv.prerelease, pw.prerelease)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max canonicalizes its arguments and then returns the version string
|
||||||
|
// that compares greater.
|
||||||
|
func Max(v, w string) string {
|
||||||
|
v = Canonical(v)
|
||||||
|
w = Canonical(w)
|
||||||
|
if Compare(v, w) > 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(v string) (p parsed, ok bool) {
|
||||||
|
if v == "" || v[0] != 'v' {
|
||||||
|
p.err = "missing v prefix"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.major, v, ok = parseInt(v[1:])
|
||||||
|
if !ok {
|
||||||
|
p.err = "bad major version"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if v == "" {
|
||||||
|
p.minor = "0"
|
||||||
|
p.patch = "0"
|
||||||
|
p.short = ".0.0"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if v[0] != '.' {
|
||||||
|
p.err = "bad minor prefix"
|
||||||
|
ok = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.minor, v, ok = parseInt(v[1:])
|
||||||
|
if !ok {
|
||||||
|
p.err = "bad minor version"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if v == "" {
|
||||||
|
p.patch = "0"
|
||||||
|
p.short = ".0"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if v[0] != '.' {
|
||||||
|
p.err = "bad patch prefix"
|
||||||
|
ok = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.patch, v, ok = parseInt(v[1:])
|
||||||
|
if !ok {
|
||||||
|
p.err = "bad patch version"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v) > 0 && v[0] == '-' {
|
||||||
|
p.prerelease, v, ok = parsePrerelease(v)
|
||||||
|
if !ok {
|
||||||
|
p.err = "bad prerelease"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(v) > 0 && v[0] == '+' {
|
||||||
|
p.build, v, ok = parseBuild(v)
|
||||||
|
if !ok {
|
||||||
|
p.err = "bad build"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v != "" {
|
||||||
|
p.err = "junk on end"
|
||||||
|
ok = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ok = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInt(v string) (t, rest string, ok bool) {
|
||||||
|
if v == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if v[0] < '0' || '9' < v[0] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i := 1
|
||||||
|
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if v[0] == '0' && i != 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return v[:i], v[i:], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePrerelease(v string) (t, rest string, ok bool) {
|
||||||
|
// "A pre-release version MAY be denoted by appending a hyphen and
|
||||||
|
// a series of dot separated identifiers immediately following the patch version.
|
||||||
|
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
|
||||||
|
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
|
||||||
|
if v == "" || v[0] != '-' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i := 1
|
||||||
|
start := 1
|
||||||
|
for i < len(v) && v[i] != '+' {
|
||||||
|
if !isIdentChar(v[i]) && v[i] != '.' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if v[i] == '.' {
|
||||||
|
if start == i || isBadNum(v[start:i]) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if start == i || isBadNum(v[start:i]) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return v[:i], v[i:], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseBuild(v string) (t, rest string, ok bool) {
|
||||||
|
if v == "" || v[0] != '+' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i := 1
|
||||||
|
start := 1
|
||||||
|
for i < len(v) {
|
||||||
|
if !isIdentChar(v[i]) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if v[i] == '.' {
|
||||||
|
if start == i {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if start == i {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return v[:i], v[i:], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIdentChar(c byte) bool {
|
||||||
|
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBadNum(v string) bool {
|
||||||
|
i := 0
|
||||||
|
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return i == len(v) && i > 1 && v[0] == '0'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNum(v string) bool {
|
||||||
|
i := 0
|
||||||
|
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return i == len(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareInt(x, y string) int {
|
||||||
|
if x == y {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if len(x) < len(y) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if len(x) > len(y) {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
if x < y {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func comparePrerelease(x, y string) int {
|
||||||
|
// "When major, minor, and patch are equal, a pre-release version has
|
||||||
|
// lower precedence than a normal version.
|
||||||
|
// Example: 1.0.0-alpha < 1.0.0.
|
||||||
|
// Precedence for two pre-release versions with the same major, minor,
|
||||||
|
// and patch version MUST be determined by comparing each dot separated
|
||||||
|
// identifier from left to right until a difference is found as follows:
|
||||||
|
// identifiers consisting of only digits are compared numerically and
|
||||||
|
// identifiers with letters or hyphens are compared lexically in ASCII
|
||||||
|
// sort order. Numeric identifiers always have lower precedence than
|
||||||
|
// non-numeric identifiers. A larger set of pre-release fields has a
|
||||||
|
// higher precedence than a smaller set, if all of the preceding
|
||||||
|
// identifiers are equal.
|
||||||
|
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
|
||||||
|
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
|
||||||
|
if x == y {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if x == "" {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
if y == "" {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
for x != "" && y != "" {
|
||||||
|
x = x[1:] // skip - or .
|
||||||
|
y = y[1:] // skip - or .
|
||||||
|
var dx, dy string
|
||||||
|
dx, x = nextIdent(x)
|
||||||
|
dy, y = nextIdent(y)
|
||||||
|
if dx != dy {
|
||||||
|
ix := isNum(dx)
|
||||||
|
iy := isNum(dy)
|
||||||
|
if ix != iy {
|
||||||
|
if ix {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ix {
|
||||||
|
if len(dx) < len(dy) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if len(dx) > len(dy) {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dx < dy {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if x == "" {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return +1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextIdent(x string) (dx, rest string) {
|
||||||
|
i := 0
|
||||||
|
for i < len(x) && x[i] != '.' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return x[:i], x[i:]
|
||||||
|
}
|
|
@ -169,6 +169,8 @@ github.com/gogo/protobuf/proto
|
||||||
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
||||||
# github.com/golang/mock v1.3.1
|
# github.com/golang/mock v1.3.1
|
||||||
github.com/golang/mock/gomock
|
github.com/golang/mock/gomock
|
||||||
|
github.com/golang/mock/mockgen
|
||||||
|
github.com/golang/mock/mockgen/model
|
||||||
# github.com/golang/protobuf v1.3.2
|
# github.com/golang/protobuf v1.3.2
|
||||||
github.com/golang/protobuf/proto
|
github.com/golang/protobuf/proto
|
||||||
github.com/golang/protobuf/ptypes
|
github.com/golang/protobuf/ptypes
|
||||||
|
@ -576,6 +578,17 @@ golang.org/x/text/runes
|
||||||
golang.org/x/text/internal/tag
|
golang.org/x/text/internal/tag
|
||||||
# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||||
golang.org/x/time/rate
|
golang.org/x/time/rate
|
||||||
|
# golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0
|
||||||
|
golang.org/x/tools/cmd/cover
|
||||||
|
golang.org/x/tools/cmd/stringer
|
||||||
|
golang.org/x/tools/go/packages
|
||||||
|
golang.org/x/tools/cover
|
||||||
|
golang.org/x/tools/go/gcexportdata
|
||||||
|
golang.org/x/tools/go/internal/packagesdriver
|
||||||
|
golang.org/x/tools/internal/gopathwalk
|
||||||
|
golang.org/x/tools/internal/semver
|
||||||
|
golang.org/x/tools/go/internal/gcimporter
|
||||||
|
golang.org/x/tools/internal/fastwalk
|
||||||
# google.golang.org/api v0.9.0
|
# google.golang.org/api v0.9.0
|
||||||
google.golang.org/api/iterator
|
google.golang.org/api/iterator
|
||||||
google.golang.org/api/option
|
google.golang.org/api/option
|
||||||
|
|
Loading…
Reference in New Issue