Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
  • renovate/git.autistici.org-ai3-go-common-digest
  • renovate/github.com-miekg-dns-1.x
  • renovate/github.com-prometheus-client_golang-1.x
  • renovate/golang.org-x-crypto-0.x
  • renovate/golang.org-x-net-0.x
  • v2
  • v3
8 results

Target

Select target project
  • ai3/tools/acmeserver
  • godog/acmeserver
  • svp-bot/acmeserver
3 results
Select Git revision
  • master
  • renovate/git.autistici.org-ai3-go-common-digest
  • renovate/github.com-miekg-dns-1.x
  • renovate/github.com-prometheus-client_golang-1.x
  • renovate/golang.org-x-crypto-0.x
  • renovate/golang.org-x-net-0.x
  • v2
  • v3
8 results
Show changes
Showing
with 5375 additions and 12 deletions
......@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
var Version = v{1, 1, 43}
var Version = v{1, 1, 50}
// v holds the version of this library.
type v struct {
......
......@@ -17,11 +17,22 @@ type Transfer struct {
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
tsigTimersOnly bool
}
// Think we need to away to stop the transfer
func (t *Transfer) tsigProvider() TsigProvider {
if t.TsigProvider != nil {
return t.TsigProvider
}
if t.TsigSecret != nil {
return tsigSecretProvider(t.TsigSecret)
}
return nil
}
// TODO: Think we need to away to stop the transfer
// In performs an incoming transfer with the server in a.
// If you would like to set the source IP, or some other attribute
......@@ -224,12 +235,9 @@ func (t *Transfer) ReadMsg() (*Msg, error) {
if err := m.Unpack(p); err != nil {
return nil, err
}
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
return m, ErrSecret
}
if ts, tp := m.IsTsig(), t.tsigProvider(); ts != nil && tp != nil {
// Need to work on the original message p, as that was used to calculate the tsig.
err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
err = TsigVerifyWithProvider(p, tp, t.tsigRequestMAC, t.tsigTimersOnly)
t.tsigRequestMAC = ts.MAC
}
return m, err
......@@ -238,11 +246,8 @@ func (t *Transfer) ReadMsg() (*Msg, error) {
// WriteMsg writes a message through the transfer connection t.
func (t *Transfer) WriteMsg(m *Msg) (err error) {
var out []byte
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
return ErrSecret
}
out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
if ts, tp := m.IsTsig(), t.tsigProvider(); ts != nil && tp != nil {
out, t.tsigRequestMAC, err = TsigGenerateWithProvider(m, tp, t.tsigRequestMAC, t.tsigTimersOnly)
} else {
out, err = m.Pack()
}
......
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semver implements comparison of semantic version strings.
// In this package, semantic version strings must begin with a leading "v",
// as in "v1.0.0".
//
// The general form of a semantic version string accepted by this package is
//
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
//
// where square brackets indicate optional parts of the syntax;
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
// using only alphanumeric characters and hyphens; and
// all-numeric PRERELEASE identifiers must not have leading zeros.
//
// This package follows Semantic Versioning 2.0.0 (see semver.org)
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver
// parsed returns the parsed form of a semantic version string.
type parsed struct {
major string
minor string
patch string
short string
prerelease string
build string
err string
}
// IsValid reports whether v is a valid semantic version string.
func IsValid(v string) bool {
_, ok := parse(v)
return ok
}
// Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata.
// Two semantic versions compare equal only if their canonical formattings
// are identical strings.
// The canonical invalid semantic version is the empty string.
func Canonical(v string) string {
p, ok := parse(v)
if !ok {
return ""
}
if p.build != "" {
return v[:len(v)-len(p.build)]
}
if p.short != "" {
return v + p.short
}
return v
}
// Major returns the major version prefix of the semantic version v.
// For example, Major("v2.1.0") == "v2".
// If v is an invalid semantic version string, Major returns the empty string.
func Major(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return v[:1+len(pv.major)]
}
// MajorMinor returns the major.minor version prefix of the semantic version v.
// For example, MajorMinor("v2.1.0") == "v2.1".
// If v is an invalid semantic version string, MajorMinor returns the empty string.
func MajorMinor(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
i := 1 + len(pv.major)
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
return v[:j]
}
return v[:i] + "." + pv.minor
}
// Prerelease returns the prerelease suffix of the semantic version v.
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
// If v is an invalid semantic version string, Prerelease returns the empty string.
func Prerelease(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.prerelease
}
// Build returns the build suffix of the semantic version v.
// For example, Build("v2.1.0+meta") == "+meta".
// If v is an invalid semantic version string, Build returns the empty string.
func Build(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.build
}
// Compare returns an integer comparing two versions according to
// semantic version precedence.
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
//
// An invalid semantic version string is considered less than a valid one.
// All invalid semantic version strings compare equal to each other.
func Compare(v, w string) int {
pv, ok1 := parse(v)
pw, ok2 := parse(w)
if !ok1 && !ok2 {
return 0
}
if !ok1 {
return -1
}
if !ok2 {
return +1
}
if c := compareInt(pv.major, pw.major); c != 0 {
return c
}
if c := compareInt(pv.minor, pw.minor); c != 0 {
return c
}
if c := compareInt(pv.patch, pw.patch); c != 0 {
return c
}
return comparePrerelease(pv.prerelease, pw.prerelease)
}
// Max canonicalizes its arguments and then returns the version string
// that compares greater.
//
// Deprecated: use Compare instead. In most cases, returning a canonicalized
// version is not expected or desired.
func Max(v, w string) string {
v = Canonical(v)
w = Canonical(w)
if Compare(v, w) > 0 {
return v
}
return w
}
func parse(v string) (p parsed, ok bool) {
if v == "" || v[0] != 'v' {
p.err = "missing v prefix"
return
}
p.major, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad major version"
return
}
if v == "" {
p.minor = "0"
p.patch = "0"
p.short = ".0.0"
return
}
if v[0] != '.' {
p.err = "bad minor prefix"
ok = false
return
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad minor version"
return
}
if v == "" {
p.patch = "0"
p.short = ".0"
return
}
if v[0] != '.' {
p.err = "bad patch prefix"
ok = false
return
}
p.patch, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad patch version"
return
}
if len(v) > 0 && v[0] == '-' {
p.prerelease, v, ok = parsePrerelease(v)
if !ok {
p.err = "bad prerelease"
return
}
}
if len(v) > 0 && v[0] == '+' {
p.build, v, ok = parseBuild(v)
if !ok {
p.err = "bad build"
return
}
}
if v != "" {
p.err = "junk on end"
ok = false
return
}
ok = true
return
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}
func parsePrerelease(v string) (t, rest string, ok bool) {
// "A pre-release version MAY be denoted by appending a hyphen and
// a series of dot separated identifiers immediately following the patch version.
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
if v == "" || v[0] != '-' {
return
}
i := 1
start := 1
for i < len(v) && v[i] != '+' {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i || isBadNum(v[start:i]) {
return
}
start = i + 1
}
i++
}
if start == i || isBadNum(v[start:i]) {
return
}
return v[:i], v[i:], true
}
func parseBuild(v string) (t, rest string, ok bool) {
if v == "" || v[0] != '+' {
return
}
i := 1
start := 1
for i < len(v) {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i {
return
}
start = i + 1
}
i++
}
if start == i {
return
}
return v[:i], v[i:], true
}
func isIdentChar(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
}
func isBadNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v) && i > 1 && v[0] == '0'
}
func isNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v)
}
func compareInt(x, y string) int {
if x == y {
return 0
}
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return +1
}
if x < y {
return -1
} else {
return +1
}
}
func comparePrerelease(x, y string) int {
// "When major, minor, and patch are equal, a pre-release version has
// lower precedence than a normal version.
// Example: 1.0.0-alpha < 1.0.0.
// Precedence for two pre-release versions with the same major, minor,
// and patch version MUST be determined by comparing each dot separated
// identifier from left to right until a difference is found as follows:
// identifiers consisting of only digits are compared numerically and
// identifiers with letters or hyphens are compared lexically in ASCII
// sort order. Numeric identifiers always have lower precedence than
// non-numeric identifiers. A larger set of pre-release fields has a
// higher precedence than a smaller set, if all of the preceding
// identifiers are equal.
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
if x == y {
return 0
}
if x == "" {
return +1
}
if y == "" {
return -1
}
for x != "" && y != "" {
x = x[1:] // skip - or .
y = y[1:] // skip - or .
var dx, dy string
dx, x = nextIdent(x)
dy, y = nextIdent(y)
if dx != dy {
ix := isNum(dx)
iy := isNum(dy)
if ix != iy {
if ix {
return -1
} else {
return +1
}
}
if ix {
if len(dx) < len(dy) {
return -1
}
if len(dx) > len(dy) {
return +1
}
}
if dx < dy {
return -1
} else {
return +1
}
}
}
if x == "" {
return -1
} else {
return +1
}
}
func nextIdent(x string) (dx, rest string) {
i := 0
for i < len(x) && x[i] != '.' {
i++
}
return x[:i], x[i:]
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package execabs is a drop-in replacement for os/exec
// that requires PATH lookups to find absolute paths.
// That is, execabs.Command("cmd") runs the same PATH lookup
// as exec.Command("cmd"), but if the result is a path
// which is relative, the Run and Start methods will report
// an error instead of running the executable.
//
// See https://blog.golang.org/path-security for more information
// about when it may be necessary or appropriate to use this package.
package execabs
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"reflect"
"unsafe"
)
// ErrNotFound is the error resulting if a path search failed to find an executable file.
// It is an alias for exec.ErrNotFound.
var ErrNotFound = exec.ErrNotFound
// Cmd represents an external command being prepared or run.
// It is an alias for exec.Cmd.
type Cmd = exec.Cmd
// Error is returned by LookPath when it fails to classify a file as an executable.
// It is an alias for exec.Error.
type Error = exec.Error
// An ExitError reports an unsuccessful exit by a command.
// It is an alias for exec.ExitError.
type ExitError = exec.ExitError
func relError(file, path string) error {
return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path)
}
// LookPath searches for an executable named file in the directories
// named by the PATH environment variable. If file contains a slash,
// it is tried directly and the PATH is not consulted. The result will be
// an absolute path.
//
// LookPath differs from exec.LookPath in its handling of PATH lookups,
// which are used for file names without slashes. If exec.LookPath's
// PATH lookup would have returned an executable from the current directory,
// LookPath instead returns an error.
func LookPath(file string) (string, error) {
path, err := exec.LookPath(file)
if err != nil {
return "", err
}
if filepath.Base(file) == file && !filepath.IsAbs(path) {
return "", relError(file, path)
}
return path, nil
}
func fixCmd(name string, cmd *exec.Cmd) {
if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) {
// exec.Command was called with a bare binary name and
// exec.LookPath returned a path which is not absolute.
// Set cmd.lookPathErr and clear cmd.Path so that it
// cannot be run.
lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer()))
if *lookPathErr == nil {
*lookPathErr = relError(name, cmd.Path)
}
cmd.Path = ""
}
}
// CommandContext is like Command but includes a context.
//
// The provided context is used to kill the process (by calling os.Process.Kill)
// if the context becomes done before the command completes on its own.
func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, name, arg...)
fixCmd(name, cmd)
return cmd
}
// Command returns the Cmd struct to execute the named program with the given arguments.
// See exec.Command for most details.
//
// Command differs from exec.Command in its handling of PATH lookups,
// which are used when the program name contains no slashes.
// If exec.Command would have returned an exec.Cmd configured to run an
// executable from the current directory, Command instead
// returns an exec.Cmd that will return an error from Start or Run.
func Command(name string, arg ...string) *exec.Cmd {
cmd := exec.Command(name, arg...)
fixCmd(name, cmd)
return cmd
}
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcexportdata provides functions for locating, reading, and
// writing export data files containing type information produced by the
// gc compiler. This package supports go1.7 export data format and all
// later versions.
//
// Although it might seem convenient for this package to live alongside
// go/types in the standard library, this would cause version skew
// problems for developer tools that use it, since they must be able to
// consume the outputs of the gc compiler both before and after a Go
// update such as from Go 1.7 to Go 1.8. Because this package lives in
// golang.org/x/tools, sites can update their version of this repo some
// time before the Go 1.8 release and rebuild and redeploy their
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
// Go update. (See discussion at https://golang.org/issue/15651.)
//
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
import (
"bufio"
"bytes"
"fmt"
"go/token"
"go/types"
"io"
"io/ioutil"
"golang.org/x/tools/go/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
// containing type information for the specified import path,
// using the workspace layout conventions of go/build.
// If no file was found, an empty filename is returned.
//
// A relative srcDir is interpreted relative to the current working directory.
//
// Find also returns the package's resolved (canonical) import path,
// reflecting the effects of srcDir and vendoring on importPath.
func Find(importPath, srcDir string) (filename, path string) {
return gcimporter.FindPkg(importPath, srcDir)
}
// NewReader returns a reader for the export data section of an object
// (.o) or archive (.a) file read from r. The new reader may provide
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
_, err := gcimporter.FindExportData(buf)
// If we ever switch to a zip-like archive format with the ToC
// at the end, we can return the correct portion of export data,
// but for now we must return the entire rest of the file.
return buf, err
}
// Read reads export data from in, decodes it, and returns type
// information for the package.
// The package name is specified by path.
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
// within the export data to other packages are consistent. The caller
// must ensure that imports[path] does not exist, or exists but is
// incomplete (see types.Package.Complete), and Read inserts the
// resulting package into this map entry.
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
data, err := ioutil.ReadAll(in)
if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
}
if bytes.HasPrefix(data, []byte("!<arch>")) {
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
}
// The App Engine Go runtime v1.6 uses the old export data format.
// TODO(adonovan): delete once v1.7 has been around for a while.
if bytes.HasPrefix(data, []byte("package ")) {
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
}
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
}
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
return pkg, err
}
// Write writes encoded type information for the specified package to out.
// The FileSet provides file position information for named objects.
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
if _, err := io.WriteString(out, "i"); err != nil {
return err
}
return gcimporter.IExportData(out, fset, pkg)
}
// ReadBundle reads an export bundle from in, decodes it, and returns type
// information for the packages.
// File position information is added to fset.
//
// ReadBundle may inspect and add to the imports map to ensure that references
// within the export bundle to other packages are consistent.
//
// On return, the state of the reader is undefined.
//
// Experimental: This API is experimental and may change in the future.
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
data, err := ioutil.ReadAll(in)
if err != nil {
return nil, fmt.Errorf("reading export bundle: %v", err)
}
return gcimporter.IImportBundle(fset, imports, data)
}
// WriteBundle writes encoded type information for the specified packages to out.
// The FileSet provides file position information for named objects.
//
// Experimental: This API is experimental and may change in the future.
func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
return gcimporter.IExportBundle(out, fset, pkgs)
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcexportdata
import (
"fmt"
"go/token"
"go/types"
"os"
)
// NewImporter returns a new instance of the types.Importer interface
// that reads type information from export data files written by gc.
// The Importer also satisfies types.ImporterFrom.
//
// Export data files are located using "go build" workspace conventions
// and the build.Default context.
//
// Use this importer instead of go/importer.For("gc", ...) to avoid the
// version-skew problems described in the documentation of this package,
// or to control the FileSet or access the imports map populated during
// package loading.
//
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
return importer{fset, imports}
}
type importer struct {
fset *token.FileSet
imports map[string]*types.Package
}
func (imp importer) Import(importPath string) (*types.Package, error) {
return imp.ImportFrom(importPath, "", 0)
}
func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
filename, path := Find(importPath, srcDir)
if filename == "" {
if importPath == "unsafe" {
// Even for unsafe, call Find first in case
// the package was vendored.
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %s", importPath)
}
if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
return pkg, nil // cache hit
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
f.Close()
if err != nil {
// add file name to error
err = fmt.Errorf("reading export data: %s: %v", filename, err)
}
}()
r, err := NewReader(f)
if err != nil {
return nil, err
}
return Read(r, imp.fset, imp.imports, path)
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
// see that file for specification of the format.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/ast"
"go/constant"
"go/token"
"go/types"
"math"
"math/big"
"sort"
"strings"
)
// If debugFormat is set, each integer and string value is preceded by a marker
// and position information in the encoding. This mechanism permits an importer
// to recognize immediately when it is out of sync. The importer recognizes this
// mode automatically (i.e., it can import export data produced with debugging
// support even if debugFormat is not set at the time of import). This mode will
// lead to massively larger export data (by a factor of 2 to 3) and should only
// be enabled during development and debugging.
//
// NOTE: This flag is the first flag to enable if importing dies because of
// (suspected) format errors, and whenever a change is made to the format.
const debugFormat = false // default: false
// If trace is set, debugging output is printed to std out.
const trace = false // default: false
// Current export format version. Increase with each format change.
// Note: The latest binary (non-indexed) export format is at version 6.
// This exporter is still at level 4, but it doesn't matter since
// the binary importer can handle older versions just fine.
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
// 4: type name objects support type aliases, uses aliasTag
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
// 2: removed unused bool in ODCL export (compiler only)
// 1: header format change (more regular), export package for _ struct fields
// 0: Go1.7 encoding
const exportVersion = 4
// trackAllTypes enables cycle tracking for all types, not just named
// types. The existing compiler invariants assume that unnamed types
// that are not completely set up are not used, or else there are spurious
// errors.
// If disabled, only named types are tracked, possibly leading to slightly
// less efficient encoding in rare cases. It also prevents the export of
// some corner-case type declarations (but those are not handled correctly
// with with the textual export format either).
// TODO(gri) enable and remove once issues caused by it are fixed
const trackAllTypes = false
type exporter struct {
fset *token.FileSet
out bytes.Buffer
// object -> index maps, indexed in order of serialization
strIndex map[string]int
pkgIndex map[*types.Package]int
typIndex map[types.Type]int
// position encoding
posInfoFormat bool
prevFile string
prevLine int
// debugging support
written int // bytes written
indent int // for trace
}
// internalError represents an error generated inside this package.
type internalError string
func (e internalError) Error() string { return "gcimporter: " + string(e) }
func internalErrorf(format string, args ...interface{}) error {
return internalError(fmt.Sprintf(format, args...))
}
// BExportData returns binary export data for pkg.
// If no file set is provided, position info will be missing.
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
defer func() {
if e := recover(); e != nil {
if ierr, ok := e.(internalError); ok {
err = ierr
return
}
// Not an internal error; panic again.
panic(e)
}
}()
p := exporter{
fset: fset,
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
pkgIndex: make(map[*types.Package]int),
typIndex: make(map[types.Type]int),
posInfoFormat: true, // TODO(gri) might become a flag, eventually
}
// write version info
// The version string must start with "version %d" where %d is the version
// number. Additional debugging information may follow after a blank; that
// text is ignored by the importer.
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
var debug string
if debugFormat {
debug = "debug"
}
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
p.bool(trackAllTypes)
p.bool(p.posInfoFormat)
// --- generic export data ---
// populate type map with predeclared "known" types
for index, typ := range predeclared() {
p.typIndex[typ] = index
}
if len(p.typIndex) != len(predeclared()) {
return nil, internalError("duplicate entries in type map?")
}
// write package data
p.pkg(pkg, true)
if trace {
p.tracef("\n")
}
// write objects
objcount := 0
scope := pkg.Scope()
for _, name := range scope.Names() {
if !ast.IsExported(name) {
continue
}
if trace {
p.tracef("\n")
}
p.obj(scope.Lookup(name))
objcount++
}
// indicate end of list
if trace {
p.tracef("\n")
}
p.tag(endTag)
// for self-verification only (redundant)
p.int(objcount)
if trace {
p.tracef("\n")
}
// --- end of export data ---
return p.out.Bytes(), nil
}
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
if pkg == nil {
panic(internalError("unexpected nil pkg"))
}
// if we saw the package before, write its index (>= 0)
if i, ok := p.pkgIndex[pkg]; ok {
p.index('P', i)
return
}
// otherwise, remember the package, write the package tag (< 0) and package data
if trace {
p.tracef("P%d = { ", len(p.pkgIndex))
defer p.tracef("} ")
}
p.pkgIndex[pkg] = len(p.pkgIndex)
p.tag(packageTag)
p.string(pkg.Name())
if emptypath {
p.string("")
} else {
p.string(pkg.Path())
}
}
func (p *exporter) obj(obj types.Object) {
switch obj := obj.(type) {
case *types.Const:
p.tag(constTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
p.value(obj.Val())
case *types.TypeName:
if obj.IsAlias() {
p.tag(aliasTag)
p.pos(obj)
p.qualifiedName(obj)
} else {
p.tag(typeTag)
}
p.typ(obj.Type())
case *types.Var:
p.tag(varTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
case *types.Func:
p.tag(funcTag)
p.pos(obj)
p.qualifiedName(obj)
sig := obj.Type().(*types.Signature)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
default:
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
}
}
func (p *exporter) pos(obj types.Object) {
if !p.posInfoFormat {
return
}
file, line := p.fileLine(obj)
if file == p.prevFile {
// common case: write line delta
// delta == 0 means different file or no line change
delta := line - p.prevLine
p.int(delta)
if delta == 0 {
p.int(-1) // -1 means no file change
}
} else {
// different file
p.int(0)
// Encode filename as length of common prefix with previous
// filename, followed by (possibly empty) suffix. Filenames
// frequently share path prefixes, so this can save a lot
// of space and make export data size less dependent on file
// path length. The suffix is unlikely to be empty because
// file names tend to end in ".go".
n := commonPrefixLen(p.prevFile, file)
p.int(n) // n >= 0
p.string(file[n:]) // write suffix only
p.prevFile = file
p.int(line)
}
p.prevLine = line
}
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
if p.fset != nil {
pos := p.fset.Position(obj.Pos())
file = pos.Filename
line = pos.Line
}
return
}
func commonPrefixLen(a, b string) int {
if len(a) > len(b) {
a, b = b, a
}
// len(a) <= len(b)
i := 0
for i < len(a) && a[i] == b[i] {
i++
}
return i
}
func (p *exporter) qualifiedName(obj types.Object) {
p.string(obj.Name())
p.pkg(obj.Pkg(), false)
}
func (p *exporter) typ(t types.Type) {
if t == nil {
panic(internalError("nil type"))
}
// Possible optimization: Anonymous pointer types *T where
// T is a named type are common. We could canonicalize all
// such types *T to a single type PT = *T. This would lead
// to at most one *T entry in typIndex, and all future *T's
// would be encoded as the respective index directly. Would
// save 1 byte (pointerTag) per *T and reduce the typIndex
// size (at the cost of a canonicalization map). We can do
// this later, without encoding format change.
// if we saw the type before, write its index (>= 0)
if i, ok := p.typIndex[t]; ok {
p.index('T', i)
return
}
// otherwise, remember the type, write the type tag (< 0) and type data
if trackAllTypes {
if trace {
p.tracef("T%d = {>\n", len(p.typIndex))
defer p.tracef("<\n} ")
}
p.typIndex[t] = len(p.typIndex)
}
switch t := t.(type) {
case *types.Named:
if !trackAllTypes {
// if we don't track all types, track named types now
p.typIndex[t] = len(p.typIndex)
}
p.tag(namedTag)
p.pos(t.Obj())
p.qualifiedName(t.Obj())
p.typ(t.Underlying())
if !types.IsInterface(t) {
p.assocMethods(t)
}
case *types.Array:
p.tag(arrayTag)
p.int64(t.Len())
p.typ(t.Elem())
case *types.Slice:
p.tag(sliceTag)
p.typ(t.Elem())
case *dddSlice:
p.tag(dddTag)
p.typ(t.elem)
case *types.Struct:
p.tag(structTag)
p.fieldList(t)
case *types.Pointer:
p.tag(pointerTag)
p.typ(t.Elem())
case *types.Signature:
p.tag(signatureTag)
p.paramList(t.Params(), t.Variadic())
p.paramList(t.Results(), false)
case *types.Interface:
p.tag(interfaceTag)
p.iface(t)
case *types.Map:
p.tag(mapTag)
p.typ(t.Key())
p.typ(t.Elem())
case *types.Chan:
p.tag(chanTag)
p.int(int(3 - t.Dir())) // hack
p.typ(t.Elem())
default:
panic(internalErrorf("unexpected type %T: %s", t, t))
}
}
func (p *exporter) assocMethods(named *types.Named) {
// Sort methods (for determinism).
var methods []*types.Func
for i := 0; i < named.NumMethods(); i++ {
methods = append(methods, named.Method(i))
}
sort.Sort(methodsByName(methods))
p.int(len(methods))
if trace && methods != nil {
p.tracef("associated methods {>\n")
}
for i, m := range methods {
if trace && i > 0 {
p.tracef("\n")
}
p.pos(m)
name := m.Name()
p.string(name)
if !exported(name) {
p.pkg(m.Pkg(), false)
}
sig := m.Type().(*types.Signature)
p.paramList(types.NewTuple(sig.Recv()), false)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
}
if trace && methods != nil {
p.tracef("<\n} ")
}
}
type methodsByName []*types.Func
func (x methodsByName) Len() int { return len(x) }
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
func (p *exporter) fieldList(t *types.Struct) {
if trace && t.NumFields() > 0 {
p.tracef("fields {>\n")
defer p.tracef("<\n} ")
}
p.int(t.NumFields())
for i := 0; i < t.NumFields(); i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.field(t.Field(i))
p.string(t.Tag(i))
}
}
func (p *exporter) field(f *types.Var) {
if !f.IsField() {
panic(internalError("field expected"))
}
p.pos(f)
p.fieldName(f)
p.typ(f.Type())
}
func (p *exporter) iface(t *types.Interface) {
// TODO(gri): enable importer to load embedded interfaces,
// then emit Embeddeds and ExplicitMethods separately here.
p.int(0)
n := t.NumMethods()
if trace && n > 0 {
p.tracef("methods {>\n")
defer p.tracef("<\n} ")
}
p.int(n)
for i := 0; i < n; i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.method(t.Method(i))
}
}
func (p *exporter) method(m *types.Func) {
sig := m.Type().(*types.Signature)
if sig.Recv() == nil {
panic(internalError("method expected"))
}
p.pos(m)
p.string(m.Name())
if m.Name() != "_" && !ast.IsExported(m.Name()) {
p.pkg(m.Pkg(), false)
}
// interface method; no need to encode receiver.
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
}
func (p *exporter) fieldName(f *types.Var) {
name := f.Name()
if f.Anonymous() {
// anonymous field - we distinguish between 3 cases:
// 1) field name matches base type name and is exported
// 2) field name matches base type name and is not exported
// 3) field name doesn't match base type name (alias name)
bname := basetypeName(f.Type())
if name == bname {
if ast.IsExported(name) {
name = "" // 1) we don't need to know the field name or package
} else {
name = "?" // 2) use unexported name "?" to force package export
}
} else {
// 3) indicate alias and export name as is
// (this requires an extra "@" but this is a rare case)
p.string("@")
}
}
p.string(name)
if name != "" && !ast.IsExported(name) {
p.pkg(f.Pkg(), false)
}
}
func basetypeName(typ types.Type) string {
switch typ := deref(typ).(type) {
case *types.Basic:
return typ.Name()
case *types.Named:
return typ.Obj().Name()
default:
return "" // unnamed type
}
}
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
// use negative length to indicate unnamed parameters
// (look at the first parameter only since either all
// names are present or all are absent)
n := params.Len()
if n > 0 && params.At(0).Name() == "" {
n = -n
}
p.int(n)
for i := 0; i < params.Len(); i++ {
q := params.At(i)
t := q.Type()
if variadic && i == params.Len()-1 {
t = &dddSlice{t.(*types.Slice).Elem()}
}
p.typ(t)
if n > 0 {
name := q.Name()
p.string(name)
if name != "_" {
p.pkg(q.Pkg(), false)
}
}
p.string("") // no compiler-specific info
}
}
func (p *exporter) value(x constant.Value) {
if trace {
p.tracef("= ")
}
switch x.Kind() {
case constant.Bool:
tag := falseTag
if constant.BoolVal(x) {
tag = trueTag
}
p.tag(tag)
case constant.Int:
if v, exact := constant.Int64Val(x); exact {
// common case: x fits into an int64 - use compact encoding
p.tag(int64Tag)
p.int64(v)
return
}
// uncommon case: large x - use float encoding
// (powers of 2 will be encoded efficiently with exponent)
p.tag(floatTag)
p.float(constant.ToFloat(x))
case constant.Float:
p.tag(floatTag)
p.float(x)
case constant.Complex:
p.tag(complexTag)
p.float(constant.Real(x))
p.float(constant.Imag(x))
case constant.String:
p.tag(stringTag)
p.string(constant.StringVal(x))
case constant.Unknown:
// package contains type errors
p.tag(unknownTag)
default:
panic(internalErrorf("unexpected value %v (%T)", x, x))
}
}
func (p *exporter) float(x constant.Value) {
if x.Kind() != constant.Float {
panic(internalErrorf("unexpected constant %v, want float", x))
}
// extract sign (there is no -0)
sign := constant.Sign(x)
if sign == 0 {
// x == 0
p.int(0)
return
}
// x != 0
var f big.Float
if v, exact := constant.Float64Val(x); exact {
// float64
f.SetFloat64(v)
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
// TODO(gri): add big.Rat accessor to constant.Value.
r := valueToRat(num)
f.SetRat(r.Quo(r, valueToRat(denom)))
} else {
// Value too large to represent as a fraction => inaccessible.
// TODO(gri): add big.Float accessor to constant.Value.
f.SetFloat64(math.MaxFloat64) // FIXME
}
// extract exponent such that 0.5 <= m < 1.0
var m big.Float
exp := f.MantExp(&m)
// extract mantissa as *big.Int
// - set exponent large enough so mant satisfies mant.IsInt()
// - get *big.Int from mant
m.SetMantExp(&m, int(m.MinPrec()))
mant, acc := m.Int(nil)
if acc != big.Exact {
panic(internalError("internal error"))
}
p.int(sign)
p.int(exp)
p.string(string(mant.Bytes()))
}
func valueToRat(x constant.Value) *big.Rat {
// Convert little-endian to big-endian.
// I can't believe this is necessary.
bytes := constant.Bytes(x)
for i := 0; i < len(bytes)/2; i++ {
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
}
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
}
func (p *exporter) bool(b bool) bool {
if trace {
p.tracef("[")
defer p.tracef("= %v] ", b)
}
x := 0
if b {
x = 1
}
p.int(x)
return b
}
// ----------------------------------------------------------------------------
// Low-level encoders
func (p *exporter) index(marker byte, index int) {
if index < 0 {
panic(internalError("invalid index < 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%c%d ", marker, index)
}
p.rawInt64(int64(index))
}
func (p *exporter) tag(tag int) {
if tag >= 0 {
panic(internalError("invalid tag >= 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%s ", tagString[-tag])
}
p.rawInt64(int64(tag))
}
func (p *exporter) int(x int) {
p.int64(int64(x))
}
func (p *exporter) int64(x int64) {
if debugFormat {
p.marker('i')
}
if trace {
p.tracef("%d ", x)
}
p.rawInt64(x)
}
func (p *exporter) string(s string) {
if debugFormat {
p.marker('s')
}
if trace {
p.tracef("%q ", s)
}
// if we saw the string before, write its index (>= 0)
// (the empty string is mapped to 0)
if i, ok := p.strIndex[s]; ok {
p.rawInt64(int64(i))
return
}
// otherwise, remember string and write its negative length and bytes
p.strIndex[s] = len(p.strIndex)
p.rawInt64(-int64(len(s)))
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
}
// marker emits a marker byte and position information which makes
// it easy for a reader to detect if it is "out of sync". Used for
// debugFormat format only.
func (p *exporter) marker(m byte) {
p.rawByte(m)
// Enable this for help tracking down the location
// of an incorrect marker when running in debugFormat.
if false && trace {
p.tracef("#%d ", p.written)
}
p.rawInt64(int64(p.written))
}
// rawInt64 should only be used by low-level encoders.
func (p *exporter) rawInt64(x int64) {
var tmp [binary.MaxVarintLen64]byte
n := binary.PutVarint(tmp[:], x)
for i := 0; i < n; i++ {
p.rawByte(tmp[i])
}
}
// rawStringln should only be used to emit the initial version string.
func (p *exporter) rawStringln(s string) {
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
p.rawByte('\n')
}
// rawByte is the bottleneck interface to write to p.out.
// rawByte escapes b as follows (any encoding does that
// hides '$'):
//
// '$' => '|' 'S'
// '|' => '|' '|'
//
// Necessary so other tools can find the end of the
// export data by searching for "$$".
// rawByte should only be used by low-level encoders.
func (p *exporter) rawByte(b byte) {
switch b {
case '$':
// write '$' as '|' 'S'
b = 'S'
fallthrough
case '|':
// write '|' as '|' '|'
p.out.WriteByte('|')
p.written++
}
p.out.WriteByte(b)
p.written++
}
// tracef is like fmt.Printf but it rewrites the format string
// to take care of indentation.
func (p *exporter) tracef(format string, args ...interface{}) {
if strings.ContainsAny(format, "<>\n") {
var buf bytes.Buffer
for i := 0; i < len(format); i++ {
// no need to deal with runes
ch := format[i]
switch ch {
case '>':
p.indent++
continue
case '<':
p.indent--
continue
}
buf.WriteByte(ch)
if ch == '\n' {
for j := p.indent; j > 0; j-- {
buf.WriteString(". ")
}
}
}
format = buf.String()
}
fmt.Printf(format, args...)
}
// Debugging support.
// (tagString is only used when tracing is enabled)
var tagString = [...]string{
// Packages
-packageTag: "package",
// Types
-namedTag: "named type",
-arrayTag: "array",
-sliceTag: "slice",
-dddTag: "ddd",
-structTag: "struct",
-pointerTag: "pointer",
-signatureTag: "signature",
-interfaceTag: "interface",
-mapTag: "map",
-chanTag: "chan",
// Values
-falseTag: "false",
-trueTag: "true",
-int64Tag: "int64",
-floatTag: "float",
-fractionTag: "fraction",
-complexTag: "complex",
-stringTag: "string",
-unknownTag: "unknown",
// Type aliases
-aliasTag: "alias",
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
package gcimporter
import (
"encoding/binary"
"fmt"
"go/constant"
"go/token"
"go/types"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
type importer struct {
imports map[string]*types.Package
data []byte
importpath string
buf []byte // for reading strings
version int // export format version
// object lists
strList []string // in order of appearance
pathList []string // in order of appearance
pkgList []*types.Package // in order of appearance
typList []types.Type // in order of appearance
interfaceList []*types.Interface // for delayed completion only
trackAllTypes bool
// position encoding
posInfoFormat bool
prevFile string
prevLine int
fake fakeFileSet
// debugging support
debugFormat bool
read int // bytes read
}
// BImportData imports a package from the serialized package data
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
// catch panics and return them as errors
const currentVersion = 6
version := -1 // unknown version
defer func() {
if e := recover(); e != nil {
// Return a (possibly nil or incomplete) package unchanged (see #16088).
if version > currentVersion {
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
} else {
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
}
}
}()
p := importer{
imports: imports,
data: data,
importpath: path,
version: version,
strList: []string{""}, // empty string is mapped to 0
pathList: []string{""}, // empty string is mapped to 0
fake: fakeFileSet{
fset: fset,
files: make(map[string]*token.File),
},
}
// read version info
var versionstr string
if b := p.rawByte(); b == 'c' || b == 'd' {
// Go1.7 encoding; first byte encodes low-level
// encoding format (compact vs debug).
// For backward-compatibility only (avoid problems with
// old installed packages). Newly compiled packages use
// the extensible format string.
// TODO(gri) Remove this support eventually; after Go1.8.
if b == 'd' {
p.debugFormat = true
}
p.trackAllTypes = p.rawByte() == 'a'
p.posInfoFormat = p.int() != 0
versionstr = p.string()
if versionstr == "v1" {
version = 0
}
} else {
// Go1.8 extensible encoding
// read version string and extract version number (ignore anything after the version number)
versionstr = p.rawStringln(b)
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
version = v
}
}
}
p.version = version
// read version specific flags - extend as necessary
switch p.version {
// case currentVersion:
// ...
// fallthrough
case currentVersion, 5, 4, 3, 2, 1:
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
p.trackAllTypes = p.int() != 0
p.posInfoFormat = p.int() != 0
case 0:
// Go1.7 encoding format - nothing to do here
default:
errorf("unknown bexport format version %d (%q)", p.version, versionstr)
}
// --- generic export data ---
// populate typList with predeclared "known" types
p.typList = append(p.typList, predeclared()...)
// read package data
pkg = p.pkg()
// read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
objcount := 0
for {
tag := p.tagOrIndex()
if tag == endTag {
break
}
p.obj(tag)
objcount++
}
// self-verification
if count := p.int(); count != objcount {
errorf("got %d objects; want %d", objcount, count)
}
// ignore compiler-specific import data
// complete interfaces
// TODO(gri) re-investigate if we still need to do this in a delayed fashion
for _, typ := range p.interfaceList {
typ.Complete()
}
// record all referenced packages as imports
list := append(([]*types.Package)(nil), p.pkgList[1:]...)
sort.Sort(byPath(list))
pkg.SetImports(list)
// package was imported completely and without errors
pkg.MarkComplete()
return p.read, pkg, nil
}
func errorf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
func (p *importer) pkg() *types.Package {
// if the package was seen before, i is its index (>= 0)
i := p.tagOrIndex()
if i >= 0 {
return p.pkgList[i]
}
// otherwise, i is the package tag (< 0)
if i != packageTag {
errorf("unexpected package tag %d version %d", i, p.version)
}
// read package data
name := p.string()
var path string
if p.version >= 5 {
path = p.path()
} else {
path = p.string()
}
if p.version >= 6 {
p.int() // package height; unused by go/types
}
// we should never see an empty package name
if name == "" {
errorf("empty package name in import")
}
// an empty path denotes the package we are currently importing;
// it must be the first package we see
if (path == "") != (len(p.pkgList) == 0) {
errorf("package path %q for pkg index %d", path, len(p.pkgList))
}
// if the package was imported before, use that one; otherwise create a new one
if path == "" {
path = p.importpath
}
pkg := p.imports[path]
if pkg == nil {
pkg = types.NewPackage(path, name)
p.imports[path] = pkg
} else if pkg.Name() != name {
errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
}
p.pkgList = append(p.pkgList, pkg)
return pkg
}
// objTag returns the tag value for each object kind.
func objTag(obj types.Object) int {
switch obj.(type) {
case *types.Const:
return constTag
case *types.TypeName:
return typeTag
case *types.Var:
return varTag
case *types.Func:
return funcTag
default:
errorf("unexpected object: %v (%T)", obj, obj) // panics
panic("unreachable")
}
}
func sameObj(a, b types.Object) bool {
// Because unnamed types are not canonicalized, we cannot simply compare types for
// (pointer) identity.
// Ideally we'd check equality of constant values as well, but this is good enough.
return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
}
func (p *importer) declare(obj types.Object) {
pkg := obj.Pkg()
if alt := pkg.Scope().Insert(obj); alt != nil {
// This can only trigger if we import a (non-type) object a second time.
// Excluding type aliases, this cannot happen because 1) we only import a package
// once; and b) we ignore compiler-specific export data which may contain
// functions whose inlined function bodies refer to other functions that
// were already imported.
// However, type aliases require reexporting the original type, so we need
// to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
// method importer.obj, switch case importing functions).
// TODO(gri) review/update this comment once the gc compiler handles type aliases.
if !sameObj(obj, alt) {
errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
}
}
}
func (p *importer) obj(tag int) {
switch tag {
case constTag:
pos := p.pos()
pkg, name := p.qualifiedName()
typ := p.typ(nil, nil)
val := p.value()
p.declare(types.NewConst(pos, pkg, name, typ, val))
case aliasTag:
// TODO(gri) verify type alias hookup is correct
pos := p.pos()
pkg, name := p.qualifiedName()
typ := p.typ(nil, nil)
p.declare(types.NewTypeName(pos, pkg, name, typ))
case typeTag:
p.typ(nil, nil)
case varTag:
pos := p.pos()
pkg, name := p.qualifiedName()
typ := p.typ(nil, nil)
p.declare(types.NewVar(pos, pkg, name, typ))
case funcTag:
pos := p.pos()
pkg, name := p.qualifiedName()
params, isddd := p.paramList()
result, _ := p.paramList()
sig := types.NewSignature(nil, params, result, isddd)
p.declare(types.NewFunc(pos, pkg, name, sig))
default:
errorf("unexpected object tag %d", tag)
}
}
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
func (p *importer) pos() token.Pos {
if !p.posInfoFormat {
return token.NoPos
}
file := p.prevFile
line := p.prevLine
delta := p.int()
line += delta
if p.version >= 5 {
if delta == deltaNewFile {
if n := p.int(); n >= 0 {
// file changed
file = p.path()
line = n
}
}
} else {
if delta == 0 {
if n := p.int(); n >= 0 {
// file changed
file = p.prevFile[:n] + p.string()
line = p.int()
}
}
}
p.prevFile = file
p.prevLine = line
return p.fake.pos(file, line, 0)
}
// Synthesize a token.Pos
type fakeFileSet struct {
fset *token.FileSet
files map[string]*token.File
}
func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
// TODO(mdempsky): Make use of column.
// Since we don't know the set of needed file positions, we
// reserve maxlines positions per file.
const maxlines = 64 * 1024
f := s.files[file]
if f == nil {
f = s.fset.AddFile(file, -1, maxlines)
s.files[file] = f
// Allocate the fake linebreak indices on first use.
// TODO(adonovan): opt: save ~512KB using a more complex scheme?
fakeLinesOnce.Do(func() {
fakeLines = make([]int, maxlines)
for i := range fakeLines {
fakeLines[i] = i
}
})
f.SetLines(fakeLines)
}
if line > maxlines {
line = 1
}
// Treat the file as if it contained only newlines
// and column=1: use the line number as the offset.
return f.Pos(line - 1)
}
var (
fakeLines []int
fakeLinesOnce sync.Once
)
func (p *importer) qualifiedName() (pkg *types.Package, name string) {
name = p.string()
pkg = p.pkg()
return
}
func (p *importer) record(t types.Type) {
p.typList = append(p.typList, t)
}
// A dddSlice is a types.Type representing ...T parameters.
// It only appears for parameter types and does not escape
// the importer.
type dddSlice struct {
elem types.Type
}
func (t *dddSlice) Underlying() types.Type { return t }
func (t *dddSlice) String() string { return "..." + t.elem.String() }
// parent is the package which declared the type; parent == nil means
// the package currently imported. The parent package is needed for
// exported struct fields and interface methods which don't contain
// explicit package information in the export data.
//
// A non-nil tname is used as the "owner" of the result type; i.e.,
// the result type is the underlying type of tname. tname is used
// to give interface methods a named receiver type where possible.
func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
// if the type was seen before, i is its index (>= 0)
i := p.tagOrIndex()
if i >= 0 {
return p.typList[i]
}
// otherwise, i is the type tag (< 0)
switch i {
case namedTag:
// read type object
pos := p.pos()
parent, name := p.qualifiedName()
scope := parent.Scope()
obj := scope.Lookup(name)
// if the object doesn't exist yet, create and insert it
if obj == nil {
obj = types.NewTypeName(pos, parent, name, nil)
scope.Insert(obj)
}
if _, ok := obj.(*types.TypeName); !ok {
errorf("pkg = %s, name = %s => %s", parent, name, obj)
}
// associate new named type with obj if it doesn't exist yet
t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
// but record the existing type, if any
tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
p.record(tname)
// read underlying type
t0.SetUnderlying(p.typ(parent, t0))
// interfaces don't have associated methods
if types.IsInterface(t0) {
return tname
}
// read associated methods
for i := p.int(); i > 0; i-- {
// TODO(gri) replace this with something closer to fieldName
pos := p.pos()
name := p.string()
if !exported(name) {
p.pkg()
}
recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
params, isddd := p.paramList()
result, _ := p.paramList()
p.int() // go:nointerface pragma - discarded
sig := types.NewSignature(recv.At(0), params, result, isddd)
t0.AddMethod(types.NewFunc(pos, parent, name, sig))
}
return tname
case arrayTag:
t := new(types.Array)
if p.trackAllTypes {
p.record(t)
}
n := p.int64()
*t = *types.NewArray(p.typ(parent, nil), n)
return t
case sliceTag:
t := new(types.Slice)
if p.trackAllTypes {
p.record(t)
}
*t = *types.NewSlice(p.typ(parent, nil))
return t
case dddTag:
t := new(dddSlice)
if p.trackAllTypes {
p.record(t)
}
t.elem = p.typ(parent, nil)
return t
case structTag:
t := new(types.Struct)
if p.trackAllTypes {
p.record(t)
}
*t = *types.NewStruct(p.fieldList(parent))
return t
case pointerTag:
t := new(types.Pointer)
if p.trackAllTypes {
p.record(t)
}
*t = *types.NewPointer(p.typ(parent, nil))
return t
case signatureTag:
t := new(types.Signature)
if p.trackAllTypes {
p.record(t)
}
params, isddd := p.paramList()
result, _ := p.paramList()
*t = *types.NewSignature(nil, params, result, isddd)
return t
case interfaceTag:
// Create a dummy entry in the type list. This is safe because we
// cannot expect the interface type to appear in a cycle, as any
// such cycle must contain a named type which would have been
// first defined earlier.
// TODO(gri) Is this still true now that we have type aliases?
// See issue #23225.
n := len(p.typList)
if p.trackAllTypes {
p.record(nil)
}
var embeddeds []types.Type
for n := p.int(); n > 0; n-- {
p.pos()
embeddeds = append(embeddeds, p.typ(parent, nil))
}
t := newInterface(p.methodList(parent, tname), embeddeds)
p.interfaceList = append(p.interfaceList, t)
if p.trackAllTypes {
p.typList[n] = t
}
return t
case mapTag:
t := new(types.Map)
if p.trackAllTypes {
p.record(t)
}
key := p.typ(parent, nil)
val := p.typ(parent, nil)
*t = *types.NewMap(key, val)
return t
case chanTag:
t := new(types.Chan)
if p.trackAllTypes {
p.record(t)
}
dir := chanDir(p.int())
val := p.typ(parent, nil)
*t = *types.NewChan(dir, val)
return t
default:
errorf("unexpected type tag %d", i) // panics
panic("unreachable")
}
}
func chanDir(d int) types.ChanDir {
// tag values must match the constants in cmd/compile/internal/gc/go.go
switch d {
case 1 /* Crecv */ :
return types.RecvOnly
case 2 /* Csend */ :
return types.SendOnly
case 3 /* Cboth */ :
return types.SendRecv
default:
errorf("unexpected channel dir %d", d)
return 0
}
}
func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
if n := p.int(); n > 0 {
fields = make([]*types.Var, n)
tags = make([]string, n)
for i := range fields {
fields[i], tags[i] = p.field(parent)
}
}
return
}
func (p *importer) field(parent *types.Package) (*types.Var, string) {
pos := p.pos()
pkg, name, alias := p.fieldName(parent)
typ := p.typ(parent, nil)
tag := p.string()
anonymous := false
if name == "" {
// anonymous field - typ must be T or *T and T must be a type name
switch typ := deref(typ).(type) {
case *types.Basic: // basic types are named types
pkg = nil // // objects defined in Universe scope have no package
name = typ.Name()
case *types.Named:
name = typ.Obj().Name()
default:
errorf("named base type expected")
}
anonymous = true
} else if alias {
// anonymous field: we have an explicit name because it's an alias
anonymous = true
}
return types.NewField(pos, pkg, name, typ, anonymous), tag
}
func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
if n := p.int(); n > 0 {
methods = make([]*types.Func, n)
for i := range methods {
methods[i] = p.method(parent, baseType)
}
}
return
}
func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
pos := p.pos()
pkg, name, _ := p.fieldName(parent)
// If we don't have a baseType, use a nil receiver.
// A receiver using the actual interface type (which
// we don't know yet) will be filled in when we call
// types.Interface.Complete.
var recv *types.Var
if baseType != nil {
recv = types.NewVar(token.NoPos, parent, "", baseType)
}
params, isddd := p.paramList()
result, _ := p.paramList()
sig := types.NewSignature(recv, params, result, isddd)
return types.NewFunc(pos, pkg, name, sig)
}
func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
name = p.string()
pkg = parent
if pkg == nil {
// use the imported package instead
pkg = p.pkgList[0]
}
if p.version == 0 && name == "_" {
// version 0 didn't export a package for _ fields
return
}
switch name {
case "":
// 1) field name matches base type name and is exported: nothing to do
case "?":
// 2) field name matches base type name and is not exported: need package
name = ""
pkg = p.pkg()
case "@":
// 3) field name doesn't match type name (alias)
name = p.string()
alias = true
fallthrough
default:
if !exported(name) {
pkg = p.pkg()
}
}
return
}
func (p *importer) paramList() (*types.Tuple, bool) {
n := p.int()
if n == 0 {
return nil, false
}
// negative length indicates unnamed parameters
named := true
if n < 0 {
n = -n
named = false
}
// n > 0
params := make([]*types.Var, n)
isddd := false
for i := range params {
params[i], isddd = p.param(named)
}
return types.NewTuple(params...), isddd
}
func (p *importer) param(named bool) (*types.Var, bool) {
t := p.typ(nil, nil)
td, isddd := t.(*dddSlice)
if isddd {
t = types.NewSlice(td.elem)
}
var pkg *types.Package
var name string
if named {
name = p.string()
if name == "" {
errorf("expected named parameter")
}
if name != "_" {
pkg = p.pkg()
}
if i := strings.Index(name, "·"); i > 0 {
name = name[:i] // cut off gc-specific parameter numbering
}
}
// read and discard compiler-specific info
p.string()
return types.NewVar(token.NoPos, pkg, name, t), isddd
}
func exported(name string) bool {
ch, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(ch)
}
func (p *importer) value() constant.Value {
switch tag := p.tagOrIndex(); tag {
case falseTag:
return constant.MakeBool(false)
case trueTag:
return constant.MakeBool(true)
case int64Tag:
return constant.MakeInt64(p.int64())
case floatTag:
return p.float()
case complexTag:
re := p.float()
im := p.float()
return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
case stringTag:
return constant.MakeString(p.string())
case unknownTag:
return constant.MakeUnknown()
default:
errorf("unexpected value tag %d", tag) // panics
panic("unreachable")
}
}
func (p *importer) float() constant.Value {
sign := p.int()
if sign == 0 {
return constant.MakeInt64(0)
}
exp := p.int()
mant := []byte(p.string()) // big endian
// remove leading 0's if any
for len(mant) > 0 && mant[0] == 0 {
mant = mant[1:]
}
// convert to little endian
// TODO(gri) go/constant should have a more direct conversion function
// (e.g., once it supports a big.Float based implementation)
for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
mant[i], mant[j] = mant[j], mant[i]
}
// adjust exponent (constant.MakeFromBytes creates an integer value,
// but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
exp -= len(mant) << 3
if len(mant) > 0 {
for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
exp++
}
}
x := constant.MakeFromBytes(mant)
switch {
case exp < 0:
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
x = constant.BinaryOp(x, token.QUO, d)
case exp > 0:
x = constant.Shift(x, token.SHL, uint(exp))
}
if sign < 0 {
x = constant.UnaryOp(token.SUB, x, 0)
}
return x
}
// ----------------------------------------------------------------------------
// Low-level decoders
func (p *importer) tagOrIndex() int {
if p.debugFormat {
p.marker('t')
}
return int(p.rawInt64())
}
func (p *importer) int() int {
x := p.int64()
if int64(int(x)) != x {
errorf("exported integer too large")
}
return int(x)
}
func (p *importer) int64() int64 {
if p.debugFormat {
p.marker('i')
}
return p.rawInt64()
}
func (p *importer) path() string {
if p.debugFormat {
p.marker('p')
}
// if the path was seen before, i is its index (>= 0)
// (the empty string is at index 0)
i := p.rawInt64()
if i >= 0 {
return p.pathList[i]
}
// otherwise, i is the negative path length (< 0)
a := make([]string, -i)
for n := range a {
a[n] = p.string()
}
s := strings.Join(a, "/")
p.pathList = append(p.pathList, s)
return s
}
func (p *importer) string() string {
if p.debugFormat {
p.marker('s')
}
// if the string was seen before, i is its index (>= 0)
// (the empty string is at index 0)
i := p.rawInt64()
if i >= 0 {
return p.strList[i]
}
// otherwise, i is the negative string length (< 0)
if n := int(-i); n <= cap(p.buf) {
p.buf = p.buf[:n]
} else {
p.buf = make([]byte, n)
}
for i := range p.buf {
p.buf[i] = p.rawByte()
}
s := string(p.buf)
p.strList = append(p.strList, s)
return s
}
func (p *importer) marker(want byte) {
if got := p.rawByte(); got != want {
errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
}
pos := p.read
if n := int(p.rawInt64()); n != pos {
errorf("incorrect position: got %d; want %d", n, pos)
}
}
// rawInt64 should only be used by low-level decoders.
func (p *importer) rawInt64() int64 {
i, err := binary.ReadVarint(p)
if err != nil {
errorf("read error: %v", err)
}
return i
}
// rawStringln should only be used to read the initial version string.
func (p *importer) rawStringln(b byte) string {
p.buf = p.buf[:0]
for b != '\n' {
p.buf = append(p.buf, b)
b = p.rawByte()
}
return string(p.buf)
}
// needed for binary.ReadVarint in rawInt64
func (p *importer) ReadByte() (byte, error) {
return p.rawByte(), nil
}
// byte is the bottleneck interface for reading p.data.
// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
// rawByte should only be used by low-level decoders.
func (p *importer) rawByte() byte {
b := p.data[0]
r := 1
if b == '|' {
b = p.data[1]
r = 2
switch b {
case 'S':
b = '$'
case '|':
// nothing to do
default:
errorf("unexpected escape sequence in export data")
}
}
p.data = p.data[r:]
p.read += r
return b
}
// ----------------------------------------------------------------------------
// Export format
// Tags. Must be < 0.
const (
// Objects
packageTag = -(iota + 1)
constTag
typeTag
varTag
funcTag
endTag
// Types
namedTag
arrayTag
sliceTag
dddTag
structTag
pointerTag
signatureTag
interfaceTag
mapTag
chanTag
// Values
falseTag
trueTag
int64Tag
floatTag
fractionTag // not used by gc
complexTag
stringTag
nilTag // only used by gc (appears in exported inlined function bodies)
unknownTag // not used by gc (only appears in packages with errors)
// Type aliases
aliasTag
)
var predeclOnce sync.Once
var predecl []types.Type // initialized lazily
func predeclared() []types.Type {
predeclOnce.Do(func() {
// initialize lazily to be sure that all
// elements have been initialized before
predecl = []types.Type{ // basic types
types.Typ[types.Bool],
types.Typ[types.Int],
types.Typ[types.Int8],
types.Typ[types.Int16],
types.Typ[types.Int32],
types.Typ[types.Int64],
types.Typ[types.Uint],
types.Typ[types.Uint8],
types.Typ[types.Uint16],
types.Typ[types.Uint32],
types.Typ[types.Uint64],
types.Typ[types.Uintptr],
types.Typ[types.Float32],
types.Typ[types.Float64],
types.Typ[types.Complex64],
types.Typ[types.Complex128],
types.Typ[types.String],
// basic type aliases
types.Universe.Lookup("byte").Type(),
types.Universe.Lookup("rune").Type(),
// error
types.Universe.Lookup("error").Type(),
// untyped types
types.Typ[types.UntypedBool],
types.Typ[types.UntypedInt],
types.Typ[types.UntypedRune],
types.Typ[types.UntypedFloat],
types.Typ[types.UntypedComplex],
types.Typ[types.UntypedString],
types.Typ[types.UntypedNil],
// package unsafe
types.Typ[types.UnsafePointer],
// invalid type
types.Typ[types.Invalid], // only appears in packages with errors
// used internally by gc; never used by this package or in .a files
anyType{},
}
})
return predecl
}
type anyType struct{}
func (t anyType) Underlying() types.Type { return t }
func (t anyType) String() string { return "any" }
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
// This file implements FindExportData.
package gcimporter
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
// See $GOROOT/include/ar.h.
hdr := make([]byte, 16+12+6+6+8+10+2)
_, err = io.ReadFull(r, hdr)
if err != nil {
return
}
// leave for debugging
if false {
fmt.Printf("header: %s", hdr)
}
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
size, err = strconv.Atoi(s)
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
err = fmt.Errorf("invalid archive header")
return
}
name = strings.TrimSpace(string(hdr[:16]))
return
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying GC-created object/archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function. The hdr result
// is the string before the export data, either "$$" or "$$B".
//
func FindExportData(r *bufio.Reader) (hdr string, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
if string(line) == "!<arch>\n" {
// Archive file. Scan to __.PKGDEF.
var name string
if name, _, err = readGopackHeader(r); err != nil {
return
}
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
err = fmt.Errorf("not a Go object file")
return
}
// Skip over object header to export data.
// Begins after first line starting with $$.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
hdr = string(line)
return
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
// but it also contains the original source-based importer code for Go1.6.
// Once we stop supporting 1.6, we can remove that code.
// Package gcimporter provides various functions for reading
// gc-generated object files that can be used to implement the
// Importer interface defined by the Go 1.5 standard library package.
package gcimporter // import "golang.org/x/tools/go/internal/gcimporter"
import (
"bufio"
"errors"
"fmt"
"go/build"
"go/constant"
"go/token"
"go/types"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"text/scanner"
)
// debugging/development support
const debug = false
var pkgExts = [...]string{".a", ".o"}
// FindPkg returns the filename and unique package id for an import
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
// If no file was found, an empty filename is returned.
//
func FindPkg(path, srcDir string) (filename, id string) {
if path == "" {
return
}
var noext string
switch {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
// Don't require the source files to be present.
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
id = path // make sure we have an id to print in error message
return
}
noext = strings.TrimSuffix(bp.PkgObj, ".a")
id = bp.ImportPath
case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
noext = filepath.Join(srcDir, path)
id = noext
case filepath.IsAbs(path):
// for completeness only - go/build.Import
// does not support absolute imports
// "/x" -> "/x.ext", "/x"
noext = path
id = path
}
if false { // for debugging
if path != id {
fmt.Printf("%s -> %s\n", path, id)
}
}
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
filename = "" // not found
return
}
// ImportData imports a package by reading the gc-generated export data,
// adds the corresponding package object to the packages map indexed by id,
// and returns the object.
//
// The packages map must contains all packages already imported. The data
// reader position must be the beginning of the export data section. The
// filename is only used in error messages.
//
// If packages[id] contains the completely imported package, that package
// can be used directly, and there is no need to call this function (but
// there is also no harm but for extra time used).
//
func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
// support for parser error handling
defer func() {
switch r := recover().(type) {
case nil:
// nothing to do
case importError:
err = r
default:
panic(r) // internal error
}
}()
var p parser
p.init(filename, id, data, packages)
pkg = p.parseExport()
return
}
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
//
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var filename, id string
if lookup != nil {
// With custom lookup specified, assume that caller has
// converted path to a canonical import path for use in the map.
if path == "unsafe" {
return types.Unsafe, nil
}
id = path
// No need to re-import if the package was imported completely before.
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
f, err := lookup(path)
if err != nil {
return nil, err
}
rc = f
} else {
filename, id = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %q", id)
}
// no need to re-import if the package was imported completely before
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
// add file name to error
err = fmt.Errorf("%s: %v", filename, err)
}
}()
rc = f
}
defer rc.Close()
var hdr string
buf := bufio.NewReader(rc)
if hdr, err = FindExportData(buf); err != nil {
return
}
switch hdr {
case "$$\n":
// Work-around if we don't have a filename; happens only if lookup != nil.
// Either way, the filename is only needed for importer error messages, so
// this is fine.
if filename == "" {
filename = path
}
return ImportData(packages, filename, id, buf)
case "$$B\n":
var data []byte
data, err = ioutil.ReadAll(buf)
if err != nil {
break
}
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
_, pkg, err = IImportData(fset, packages, data[1:], id)
} else {
_, pkg, err = BImportData(fset, packages, data, id)
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
}
return
}
// ----------------------------------------------------------------------------
// Parser
// TODO(gri) Imported objects don't have position information.
// Ideally use the debug table line info; alternatively
// create some fake position (or the position of the
// import). That way error messages referring to imported
// objects can print meaningful information.
// parser parses the exports inside a gc compiler-produced
// object/archive file and populates its scope with the results.
type parser struct {
scanner scanner.Scanner
tok rune // current token
lit string // literal string; only valid for Ident, Int, String tokens
id string // package id of imported package
sharedPkgs map[string]*types.Package // package id -> package object (across importer)
localPkgs map[string]*types.Package // package id -> package object (just this package)
}
func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
p.scanner.Init(src)
p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
p.scanner.Whitespace = 1<<'\t' | 1<<' '
p.scanner.Filename = filename // for good error messages
p.next()
p.id = id
p.sharedPkgs = packages
if debug {
// check consistency of packages map
for _, pkg := range packages {
if pkg.Name() == "" {
fmt.Printf("no package name for %s\n", pkg.Path())
}
}
}
}
func (p *parser) next() {
p.tok = p.scanner.Scan()
switch p.tok {
case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
p.lit = p.scanner.TokenText()
default:
p.lit = ""
}
if debug {
fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
}
}
func declTypeName(pkg *types.Package, name string) *types.TypeName {
scope := pkg.Scope()
if obj := scope.Lookup(name); obj != nil {
return obj.(*types.TypeName)
}
obj := types.NewTypeName(token.NoPos, pkg, name, nil)
// a named type may be referred to before the underlying type
// is known - set it up
types.NewNamed(obj, nil, nil)
scope.Insert(obj)
return obj
}
// ----------------------------------------------------------------------------
// Error handling
// Internal errors are boxed as importErrors.
type importError struct {
pos scanner.Position
err error
}
func (e importError) Error() string {
return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
}
func (p *parser) error(err interface{}) {
if s, ok := err.(string); ok {
err = errors.New(s)
}
// panic with a runtime.Error if err is not an error
panic(importError{p.scanner.Pos(), err.(error)})
}
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Sprintf(format, args...))
}
func (p *parser) expect(tok rune) string {
lit := p.lit
if p.tok != tok {
p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
}
p.next()
return lit
}
func (p *parser) expectSpecial(tok string) {
sep := 'x' // not white space
i := 0
for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
p.next()
i++
}
if i < len(tok) {
p.errorf("expected %q, got %q", tok, tok[0:i])
}
}
func (p *parser) expectKeyword(keyword string) {
lit := p.expect(scanner.Ident)
if lit != keyword {
p.errorf("expected keyword %s, got %q", keyword, lit)
}
}
// ----------------------------------------------------------------------------
// Qualified and unqualified names
// PackageId = string_lit .
//
func (p *parser) parsePackageID() string {
id, err := strconv.Unquote(p.expect(scanner.String))
if err != nil {
p.error(err)
}
// id == "" stands for the imported package id
// (only known at time of package installation)
if id == "" {
id = p.id
}
return id
}
// PackageName = ident .
//
func (p *parser) parsePackageName() string {
return p.expect(scanner.Ident)
}
// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
func (p *parser) parseDotIdent() string {
ident := ""
if p.tok != scanner.Int {
sep := 'x' // not white space
for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
ident += p.lit
sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
p.next()
}
}
if ident == "" {
p.expect(scanner.Ident) // use expect() for error handling
}
return ident
}
// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
//
func (p *parser) parseQualifiedName() (id, name string) {
p.expect('@')
id = p.parsePackageID()
p.expect('.')
// Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
if p.tok == '?' {
p.next()
} else {
name = p.parseDotIdent()
}
return
}
// getPkg returns the package for a given id. If the package is
// not found, create the package and add it to the p.localPkgs
// and p.sharedPkgs maps. name is the (expected) name of the
// package. If name == "", the package name is expected to be
// set later via an import clause in the export data.
//
// id identifies a package, usually by a canonical package path like
// "encoding/json" but possibly by a non-canonical import path like
// "./json".
//
func (p *parser) getPkg(id, name string) *types.Package {
// package unsafe is not in the packages maps - handle explicitly
if id == "unsafe" {
return types.Unsafe
}
pkg := p.localPkgs[id]
if pkg == nil {
// first import of id from this package
pkg = p.sharedPkgs[id]
if pkg == nil {
// first import of id by this importer;
// add (possibly unnamed) pkg to shared packages
pkg = types.NewPackage(id, name)
p.sharedPkgs[id] = pkg
}
// add (possibly unnamed) pkg to local packages
if p.localPkgs == nil {
p.localPkgs = make(map[string]*types.Package)
}
p.localPkgs[id] = pkg
} else if name != "" {
// package exists already and we have an expected package name;
// make sure names match or set package name if necessary
if pname := pkg.Name(); pname == "" {
pkg.SetName(name)
} else if pname != name {
p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
}
}
return pkg
}
// parseExportedName is like parseQualifiedName, but
// the package id is resolved to an imported *types.Package.
//
func (p *parser) parseExportedName() (pkg *types.Package, name string) {
id, name := p.parseQualifiedName()
pkg = p.getPkg(id, "")
return
}
// ----------------------------------------------------------------------------
// Types
// BasicType = identifier .
//
func (p *parser) parseBasicType() types.Type {
id := p.expect(scanner.Ident)
obj := types.Universe.Lookup(id)
if obj, ok := obj.(*types.TypeName); ok {
return obj.Type()
}
p.errorf("not a basic type: %s", id)
return nil
}
// ArrayType = "[" int_lit "]" Type .
//
func (p *parser) parseArrayType(parent *types.Package) types.Type {
// "[" already consumed and lookahead known not to be "]"
lit := p.expect(scanner.Int)
p.expect(']')
elem := p.parseType(parent)
n, err := strconv.ParseInt(lit, 10, 64)
if err != nil {
p.error(err)
}
return types.NewArray(elem, n)
}
// MapType = "map" "[" Type "]" Type .
//
func (p *parser) parseMapType(parent *types.Package) types.Type {
p.expectKeyword("map")
p.expect('[')
key := p.parseType(parent)
p.expect(']')
elem := p.parseType(parent)
return types.NewMap(key, elem)
}
// Name = identifier | "?" | QualifiedName .
//
// For unqualified and anonymous names, the returned package is the parent
// package unless parent == nil, in which case the returned package is the
// package being imported. (The parent package is not nil if the name
// is an unqualified struct field or interface method name belonging to a
// type declared in another package.)
//
// For qualified names, the returned package is nil (and not created if
// it doesn't exist yet) unless materializePkg is set (which creates an
// unnamed package with valid package path). In the latter case, a
// subsequent import clause is expected to provide a name for the package.
//
func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
pkg = parent
if pkg == nil {
pkg = p.sharedPkgs[p.id]
}
switch p.tok {
case scanner.Ident:
name = p.lit
p.next()
case '?':
// anonymous
p.next()
case '@':
// exported name prefixed with package path
pkg = nil
var id string
id, name = p.parseQualifiedName()
if materializePkg {
pkg = p.getPkg(id, "")
}
default:
p.error("name expected")
}
return
}
func deref(typ types.Type) types.Type {
if p, _ := typ.(*types.Pointer); p != nil {
return p.Elem()
}
return typ
}
// Field = Name Type [ string_lit ] .
//
func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
pkg, name := p.parseName(parent, true)
if name == "_" {
// Blank fields should be package-qualified because they
// are unexported identifiers, but gc does not qualify them.
// Assuming that the ident belongs to the current package
// causes types to change during re-exporting, leading
// to spurious "can't assign A to B" errors from go/types.
// As a workaround, pretend all blank fields belong
// to the same unique dummy package.
const blankpkg = "<_>"
pkg = p.getPkg(blankpkg, blankpkg)
}
typ := p.parseType(parent)
anonymous := false
if name == "" {
// anonymous field - typ must be T or *T and T must be a type name
switch typ := deref(typ).(type) {
case *types.Basic: // basic types are named types
pkg = nil // objects defined in Universe scope have no package
name = typ.Name()
case *types.Named:
name = typ.Obj().Name()
default:
p.errorf("anonymous field expected")
}
anonymous = true
}
tag := ""
if p.tok == scanner.String {
s := p.expect(scanner.String)
var err error
tag, err = strconv.Unquote(s)
if err != nil {
p.errorf("invalid struct tag %s: %s", s, err)
}
}
return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
}
// StructType = "struct" "{" [ FieldList ] "}" .
// FieldList = Field { ";" Field } .
//
func (p *parser) parseStructType(parent *types.Package) types.Type {
var fields []*types.Var
var tags []string
p.expectKeyword("struct")
p.expect('{')
for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
if i > 0 {
p.expect(';')
}
fld, tag := p.parseField(parent)
if tag != "" && tags == nil {
tags = make([]string, i)
}
if tags != nil {
tags = append(tags, tag)
}
fields = append(fields, fld)
}
p.expect('}')
return types.NewStruct(fields, tags)
}
// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
//
func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
_, name := p.parseName(nil, false)
// remove gc-specific parameter numbering
if i := strings.Index(name, "·"); i >= 0 {
name = name[:i]
}
if p.tok == '.' {
p.expectSpecial("...")
isVariadic = true
}
typ := p.parseType(nil)
if isVariadic {
typ = types.NewSlice(typ)
}
// ignore argument tag (e.g. "noescape")
if p.tok == scanner.String {
p.next()
}
// TODO(gri) should we provide a package?
par = types.NewVar(token.NoPos, nil, name, typ)
return
}
// Parameters = "(" [ ParameterList ] ")" .
// ParameterList = { Parameter "," } Parameter .
//
func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
p.expect('(')
for p.tok != ')' && p.tok != scanner.EOF {
if len(list) > 0 {
p.expect(',')
}
par, variadic := p.parseParameter()
list = append(list, par)
if variadic {
if isVariadic {
p.error("... not on final argument")
}
isVariadic = true
}
}
p.expect(')')
return
}
// Signature = Parameters [ Result ] .
// Result = Type | Parameters .
//
func (p *parser) parseSignature(recv *types.Var) *types.Signature {
params, isVariadic := p.parseParameters()
// optional result type
var results []*types.Var
if p.tok == '(' {
var variadic bool
results, variadic = p.parseParameters()
if variadic {
p.error("... not permitted on result type")
}
}
return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
}
// InterfaceType = "interface" "{" [ MethodList ] "}" .
// MethodList = Method { ";" Method } .
// Method = Name Signature .
//
// The methods of embedded interfaces are always "inlined"
// by the compiler and thus embedded interfaces are never
// visible in the export data.
//
func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
var methods []*types.Func
p.expectKeyword("interface")
p.expect('{')
for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
if i > 0 {
p.expect(';')
}
pkg, name := p.parseName(parent, true)
sig := p.parseSignature(nil)
methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
}
p.expect('}')
// Complete requires the type's embedded interfaces to be fully defined,
// but we do not define any
return newInterface(methods, nil).Complete()
}
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
//
func (p *parser) parseChanType(parent *types.Package) types.Type {
dir := types.SendRecv
if p.tok == scanner.Ident {
p.expectKeyword("chan")
if p.tok == '<' {
p.expectSpecial("<-")
dir = types.SendOnly
}
} else {
p.expectSpecial("<-")
p.expectKeyword("chan")
dir = types.RecvOnly
}
elem := p.parseType(parent)
return types.NewChan(dir, elem)
}
// Type =
// BasicType | TypeName | ArrayType | SliceType | StructType |
// PointerType | FuncType | InterfaceType | MapType | ChanType |
// "(" Type ")" .
//
// BasicType = ident .
// TypeName = ExportedName .
// SliceType = "[" "]" Type .
// PointerType = "*" Type .
// FuncType = "func" Signature .
//
func (p *parser) parseType(parent *types.Package) types.Type {
switch p.tok {
case scanner.Ident:
switch p.lit {
default:
return p.parseBasicType()
case "struct":
return p.parseStructType(parent)
case "func":
// FuncType
p.next()
return p.parseSignature(nil)
case "interface":
return p.parseInterfaceType(parent)
case "map":
return p.parseMapType(parent)
case "chan":
return p.parseChanType(parent)
}
case '@':
// TypeName
pkg, name := p.parseExportedName()
return declTypeName(pkg, name).Type()
case '[':
p.next() // look ahead
if p.tok == ']' {
// SliceType
p.next()
return types.NewSlice(p.parseType(parent))
}
return p.parseArrayType(parent)
case '*':
// PointerType
p.next()
return types.NewPointer(p.parseType(parent))
case '<':
return p.parseChanType(parent)
case '(':
// "(" Type ")"
p.next()
typ := p.parseType(parent)
p.expect(')')
return typ
}
p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
return nil
}
// ----------------------------------------------------------------------------
// Declarations
// ImportDecl = "import" PackageName PackageId .
//
func (p *parser) parseImportDecl() {
p.expectKeyword("import")
name := p.parsePackageName()
p.getPkg(p.parsePackageID(), name)
}
// int_lit = [ "+" | "-" ] { "0" ... "9" } .
//
func (p *parser) parseInt() string {
s := ""
switch p.tok {
case '-':
s = "-"
p.next()
case '+':
p.next()
}
return s + p.expect(scanner.Int)
}
// number = int_lit [ "p" int_lit ] .
//
func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
// mantissa
mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
if mant == nil {
panic("invalid mantissa")
}
if p.lit == "p" {
// exponent (base 2)
p.next()
exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
if err != nil {
p.error(err)
}
if exp < 0 {
denom := constant.MakeInt64(1)
denom = constant.Shift(denom, token.SHL, uint(-exp))
typ = types.Typ[types.UntypedFloat]
val = constant.BinaryOp(mant, token.QUO, denom)
return
}
if exp > 0 {
mant = constant.Shift(mant, token.SHL, uint(exp))
}
typ = types.Typ[types.UntypedFloat]
val = mant
return
}
typ = types.Typ[types.UntypedInt]
val = mant
return
}
// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
// bool_lit = "true" | "false" .
// complex_lit = "(" float_lit "+" float_lit "i" ")" .
// rune_lit = "(" int_lit "+" int_lit ")" .
// string_lit = `"` { unicode_char } `"` .
//
func (p *parser) parseConstDecl() {
p.expectKeyword("const")
pkg, name := p.parseExportedName()
var typ0 types.Type
if p.tok != '=' {
// constant types are never structured - no need for parent type
typ0 = p.parseType(nil)
}
p.expect('=')
var typ types.Type
var val constant.Value
switch p.tok {
case scanner.Ident:
// bool_lit
if p.lit != "true" && p.lit != "false" {
p.error("expected true or false")
}
typ = types.Typ[types.UntypedBool]
val = constant.MakeBool(p.lit == "true")
p.next()
case '-', scanner.Int:
// int_lit
typ, val = p.parseNumber()
case '(':
// complex_lit or rune_lit
p.next()
if p.tok == scanner.Char {
p.next()
p.expect('+')
typ = types.Typ[types.UntypedRune]
_, val = p.parseNumber()
p.expect(')')
break
}
_, re := p.parseNumber()
p.expect('+')
_, im := p.parseNumber()
p.expectKeyword("i")
p.expect(')')
typ = types.Typ[types.UntypedComplex]
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
case scanner.Char:
// rune_lit
typ = types.Typ[types.UntypedRune]
val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
p.next()
case scanner.String:
// string_lit
typ = types.Typ[types.UntypedString]
val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
p.next()
default:
p.errorf("expected literal got %s", scanner.TokenString(p.tok))
}
if typ0 == nil {
typ0 = typ
}
pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
}
// TypeDecl = "type" ExportedName Type .
//
func (p *parser) parseTypeDecl() {
p.expectKeyword("type")
pkg, name := p.parseExportedName()
obj := declTypeName(pkg, name)
// The type object may have been imported before and thus already
// have a type associated with it. We still need to parse the type
// structure, but throw it away if the object already has a type.
// This ensures that all imports refer to the same type object for
// a given type declaration.
typ := p.parseType(pkg)
if name := obj.Type().(*types.Named); name.Underlying() == nil {
name.SetUnderlying(typ)
}
}
// VarDecl = "var" ExportedName Type .
//
func (p *parser) parseVarDecl() {
p.expectKeyword("var")
pkg, name := p.parseExportedName()
typ := p.parseType(pkg)
pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
}
// Func = Signature [ Body ] .
// Body = "{" ... "}" .
//
func (p *parser) parseFunc(recv *types.Var) *types.Signature {
sig := p.parseSignature(recv)
if p.tok == '{' {
p.next()
for i := 1; i > 0; p.next() {
switch p.tok {
case '{':
i++
case '}':
i--
}
}
}
return sig
}
// MethodDecl = "func" Receiver Name Func .
// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
//
func (p *parser) parseMethodDecl() {
// "func" already consumed
p.expect('(')
recv, _ := p.parseParameter() // receiver
p.expect(')')
// determine receiver base type object
base := deref(recv.Type()).(*types.Named)
// parse method name, signature, and possibly inlined body
_, name := p.parseName(nil, false)
sig := p.parseFunc(recv)
// methods always belong to the same package as the base type object
pkg := base.Obj().Pkg()
// add method to type unless type was imported before
// and method exists already
// TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
}
// FuncDecl = "func" ExportedName Func .
//
func (p *parser) parseFuncDecl() {
// "func" already consumed
pkg, name := p.parseExportedName()
typ := p.parseFunc(nil)
pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
}
// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
//
func (p *parser) parseDecl() {
if p.tok == scanner.Ident {
switch p.lit {
case "import":
p.parseImportDecl()
case "const":
p.parseConstDecl()
case "type":
p.parseTypeDecl()
case "var":
p.parseVarDecl()
case "func":
p.next() // look ahead
if p.tok == '(' {
p.parseMethodDecl()
} else {
p.parseFuncDecl()
}
}
}
p.expect('\n')
}
// ----------------------------------------------------------------------------
// Export
// Export = "PackageClause { Decl } "$$" .
// PackageClause = "package" PackageName [ "safe" ] "\n" .
//
func (p *parser) parseExport() *types.Package {
p.expectKeyword("package")
name := p.parsePackageName()
if p.tok == scanner.Ident && p.lit == "safe" {
// package was compiled with -u option - ignore
p.next()
}
p.expect('\n')
pkg := p.getPkg(p.id, name)
for p.tok != '$' && p.tok != scanner.EOF {
p.parseDecl()
}
if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
// don't call next()/expect() since reading past the
// export data may cause scanner errors (e.g. NUL chars)
p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
}
if n := p.scanner.ErrorCount; n != 0 {
p.errorf("expected no scanner errors, got %d", n)
}
// Record all locally referenced packages as imports.
var imports []*types.Package
for id, pkg2 := range p.localPkgs {
if pkg2.Name() == "" {
p.errorf("%s package has no name", id)
}
if id == p.id {
continue // avoid self-edge
}
imports = append(imports, pkg2)
}
sort.Sort(byPath(imports))
pkg.SetImports(imports)
// package was imported completely and without errors
pkg.MarkComplete()
return pkg
}
type byPath []*types.Package
func (a byPath) Len() int { return len(a) }
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
// see that file for specification of the format.
package gcimporter
import (
"bytes"
"encoding/binary"
"go/ast"
"go/constant"
"go/token"
"go/types"
"io"
"math/big"
"reflect"
"sort"
)
// Current indexed export format version. Increase with each format change.
// 0: Go1.11 encoding
const iexportVersion = 0
// Current bundled export format version. Increase with each format change.
// 0: initial implementation
const bundleVersion = 0
// IExportData writes indexed export data for pkg to out.
//
// If no file set is provided, position info will be missing.
// The package path of the top-level package will not be recorded,
// so that calls to IImportData can override with a provided package path.
func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
return iexportCommon(out, fset, false, []*types.Package{pkg})
}
// IExportBundle writes an indexed export bundle for pkgs to out.
func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
return iexportCommon(out, fset, true, pkgs)
}
func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, pkgs []*types.Package) (err error) {
defer func() {
if e := recover(); e != nil {
if ierr, ok := e.(internalError); ok {
err = ierr
return
}
// Not an internal error; panic again.
panic(e)
}
}()
p := iexporter{
fset: fset,
allPkgs: map[*types.Package]bool{},
stringIndex: map[string]uint64{},
declIndex: map[types.Object]uint64{},
typIndex: map[types.Type]uint64{},
}
if !bundle {
p.localpkg = pkgs[0]
}
for i, pt := range predeclared() {
p.typIndex[pt] = uint64(i)
}
if len(p.typIndex) > predeclReserved {
panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
}
// Initialize work queue with exported declarations.
for _, pkg := range pkgs {
scope := pkg.Scope()
for _, name := range scope.Names() {
if ast.IsExported(name) {
p.pushDecl(scope.Lookup(name))
}
}
if bundle {
// Ensure pkg and its imports are included in the index.
p.allPkgs[pkg] = true
for _, imp := range pkg.Imports() {
p.allPkgs[imp] = true
}
}
}
// Loop until no more work.
for !p.declTodo.empty() {
p.doDecl(p.declTodo.popHead())
}
// Append indices to data0 section.
dataLen := uint64(p.data0.Len())
w := p.newWriter()
w.writeIndex(p.declIndex)
if bundle {
w.uint64(uint64(len(pkgs)))
for _, pkg := range pkgs {
w.pkg(pkg)
imps := pkg.Imports()
w.uint64(uint64(len(imps)))
for _, imp := range imps {
w.pkg(imp)
}
}
}
w.flush()
// Assemble header.
var hdr intWriter
if bundle {
hdr.uint64(bundleVersion)
}
hdr.uint64(iexportVersion)
hdr.uint64(uint64(p.strings.Len()))
hdr.uint64(dataLen)
// Flush output.
io.Copy(out, &hdr)
io.Copy(out, &p.strings)
io.Copy(out, &p.data0)
return nil
}
// writeIndex writes out an object index. mainIndex indicates whether
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
// Build a map from packages to objects from that package.
pkgObjs := map[*types.Package][]types.Object{}
// For the main index, make sure to include every package that
// we reference, even if we're not exporting (or reexporting)
// any symbols from it.
if w.p.localpkg != nil {
pkgObjs[w.p.localpkg] = nil
}
for pkg := range w.p.allPkgs {
pkgObjs[pkg] = nil
}
for obj := range index {
pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
}
var pkgs []*types.Package
for pkg, objs := range pkgObjs {
pkgs = append(pkgs, pkg)
sort.Slice(objs, func(i, j int) bool {
return objs[i].Name() < objs[j].Name()
})
}
sort.Slice(pkgs, func(i, j int) bool {
return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
})
w.uint64(uint64(len(pkgs)))
for _, pkg := range pkgs {
w.string(w.exportPath(pkg))
w.string(pkg.Name())
w.uint64(uint64(0)) // package height is not needed for go/types
objs := pkgObjs[pkg]
w.uint64(uint64(len(objs)))
for _, obj := range objs {
w.string(obj.Name())
w.uint64(index[obj])
}
}
}
type iexporter struct {
fset *token.FileSet
out *bytes.Buffer
localpkg *types.Package
// allPkgs tracks all packages that have been referenced by
// the export data, so we can ensure to include them in the
// main index.
allPkgs map[*types.Package]bool
declTodo objQueue
strings intWriter
stringIndex map[string]uint64
data0 intWriter
declIndex map[types.Object]uint64
typIndex map[types.Type]uint64
}
// stringOff returns the offset of s within the string section.
// If not already present, it's added to the end.
func (p *iexporter) stringOff(s string) uint64 {
off, ok := p.stringIndex[s]
if !ok {
off = uint64(p.strings.Len())
p.stringIndex[s] = off
p.strings.uint64(uint64(len(s)))
p.strings.WriteString(s)
}
return off
}
// pushDecl adds n to the declaration work queue, if not already present.
func (p *iexporter) pushDecl(obj types.Object) {
// Package unsafe is known to the compiler and predeclared.
assert(obj.Pkg() != types.Unsafe)
if _, ok := p.declIndex[obj]; ok {
return
}
p.declIndex[obj] = ^uint64(0) // mark n present in work queue
p.declTodo.pushTail(obj)
}
// exportWriter handles writing out individual data section chunks.
type exportWriter struct {
p *iexporter
data intWriter
currPkg *types.Package
prevFile string
prevLine int64
}
func (w *exportWriter) exportPath(pkg *types.Package) string {
if pkg == w.p.localpkg {
return ""
}
return pkg.Path()
}
func (p *iexporter) doDecl(obj types.Object) {
w := p.newWriter()
w.setPkg(obj.Pkg(), false)
switch obj := obj.(type) {
case *types.Var:
w.tag('V')
w.pos(obj.Pos())
w.typ(obj.Type(), obj.Pkg())
case *types.Func:
sig, _ := obj.Type().(*types.Signature)
if sig.Recv() != nil {
panic(internalErrorf("unexpected method: %v", sig))
}
w.tag('F')
w.pos(obj.Pos())
w.signature(sig)
case *types.Const:
w.tag('C')
w.pos(obj.Pos())
w.value(obj.Type(), obj.Val())
case *types.TypeName:
if obj.IsAlias() {
w.tag('A')
w.pos(obj.Pos())
w.typ(obj.Type(), obj.Pkg())
break
}
// Defined type.
w.tag('T')
w.pos(obj.Pos())
underlying := obj.Type().Underlying()
w.typ(underlying, obj.Pkg())
t := obj.Type()
if types.IsInterface(t) {
break
}
named, ok := t.(*types.Named)
if !ok {
panic(internalErrorf("%s is not a defined type", t))
}
n := named.NumMethods()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
m := named.Method(i)
w.pos(m.Pos())
w.string(m.Name())
sig, _ := m.Type().(*types.Signature)
w.param(sig.Recv())
w.signature(sig)
}
default:
panic(internalErrorf("unexpected object: %v", obj))
}
p.declIndex[obj] = w.flush()
}
func (w *exportWriter) tag(tag byte) {
w.data.WriteByte(tag)
}
func (w *exportWriter) pos(pos token.Pos) {
if w.p.fset == nil {
w.int64(0)
return
}
p := w.p.fset.Position(pos)
file := p.Filename
line := int64(p.Line)
// When file is the same as the last position (common case),
// we can save a few bytes by delta encoding just the line
// number.
//
// Note: Because data objects may be read out of order (or not
// at all), we can only apply delta encoding within a single
// object. This is handled implicitly by tracking prevFile and
// prevLine as fields of exportWriter.
if file == w.prevFile {
delta := line - w.prevLine
w.int64(delta)
if delta == deltaNewFile {
w.int64(-1)
}
} else {
w.int64(deltaNewFile)
w.int64(line) // line >= 0
w.string(file)
w.prevFile = file
}
w.prevLine = line
}
func (w *exportWriter) pkg(pkg *types.Package) {
// Ensure any referenced packages are declared in the main index.
w.p.allPkgs[pkg] = true
w.string(w.exportPath(pkg))
}
func (w *exportWriter) qualifiedIdent(obj types.Object) {
// Ensure any referenced declarations are written out too.
w.p.pushDecl(obj)
w.string(obj.Name())
w.pkg(obj.Pkg())
}
func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
w.data.uint64(w.p.typOff(t, pkg))
}
func (p *iexporter) newWriter() *exportWriter {
return &exportWriter{p: p}
}
func (w *exportWriter) flush() uint64 {
off := uint64(w.p.data0.Len())
io.Copy(&w.p.data0, &w.data)
return off
}
func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
off, ok := p.typIndex[t]
if !ok {
w := p.newWriter()
w.doTyp(t, pkg)
off = predeclReserved + w.flush()
p.typIndex[t] = off
}
return off
}
func (w *exportWriter) startType(k itag) {
w.data.uint64(uint64(k))
}
func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
switch t := t.(type) {
case *types.Named:
w.startType(definedType)
w.qualifiedIdent(t.Obj())
case *types.Pointer:
w.startType(pointerType)
w.typ(t.Elem(), pkg)
case *types.Slice:
w.startType(sliceType)
w.typ(t.Elem(), pkg)
case *types.Array:
w.startType(arrayType)
w.uint64(uint64(t.Len()))
w.typ(t.Elem(), pkg)
case *types.Chan:
w.startType(chanType)
// 1 RecvOnly; 2 SendOnly; 3 SendRecv
var dir uint64
switch t.Dir() {
case types.RecvOnly:
dir = 1
case types.SendOnly:
dir = 2
case types.SendRecv:
dir = 3
}
w.uint64(dir)
w.typ(t.Elem(), pkg)
case *types.Map:
w.startType(mapType)
w.typ(t.Key(), pkg)
w.typ(t.Elem(), pkg)
case *types.Signature:
w.startType(signatureType)
w.setPkg(pkg, true)
w.signature(t)
case *types.Struct:
w.startType(structType)
w.setPkg(pkg, true)
n := t.NumFields()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
f := t.Field(i)
w.pos(f.Pos())
w.string(f.Name())
w.typ(f.Type(), pkg)
w.bool(f.Anonymous())
w.string(t.Tag(i)) // note (or tag)
}
case *types.Interface:
w.startType(interfaceType)
w.setPkg(pkg, true)
n := t.NumEmbeddeds()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
f := t.Embedded(i)
w.pos(f.Obj().Pos())
w.typ(f.Obj().Type(), f.Obj().Pkg())
}
n = t.NumExplicitMethods()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
m := t.ExplicitMethod(i)
w.pos(m.Pos())
w.string(m.Name())
sig, _ := m.Type().(*types.Signature)
w.signature(sig)
}
default:
panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
}
}
func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
if write {
w.pkg(pkg)
}
w.currPkg = pkg
}
func (w *exportWriter) signature(sig *types.Signature) {
w.paramList(sig.Params())
w.paramList(sig.Results())
if sig.Params().Len() > 0 {
w.bool(sig.Variadic())
}
}
func (w *exportWriter) paramList(tup *types.Tuple) {
n := tup.Len()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
w.param(tup.At(i))
}
}
func (w *exportWriter) param(obj types.Object) {
w.pos(obj.Pos())
w.localIdent(obj)
w.typ(obj.Type(), obj.Pkg())
}
func (w *exportWriter) value(typ types.Type, v constant.Value) {
w.typ(typ, nil)
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
case types.IsBoolean:
w.bool(constant.BoolVal(v))
case types.IsInteger:
var i big.Int
if i64, exact := constant.Int64Val(v); exact {
i.SetInt64(i64)
} else if ui64, exact := constant.Uint64Val(v); exact {
i.SetUint64(ui64)
} else {
i.SetString(v.ExactString(), 10)
}
w.mpint(&i, typ)
case types.IsFloat:
f := constantToFloat(v)
w.mpfloat(f, typ)
case types.IsComplex:
w.mpfloat(constantToFloat(constant.Real(v)), typ)
w.mpfloat(constantToFloat(constant.Imag(v)), typ)
case types.IsString:
w.string(constant.StringVal(v))
default:
if b.Kind() == types.Invalid {
// package contains type errors
break
}
panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
}
}
// constantToFloat converts a constant.Value with kind constant.Float to a
// big.Float.
func constantToFloat(x constant.Value) *big.Float {
x = constant.ToFloat(x)
// Use the same floating-point precision (512) as cmd/compile
// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
const mpprec = 512
var f big.Float
f.SetPrec(mpprec)
if v, exact := constant.Float64Val(x); exact {
// float64
f.SetFloat64(v)
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
// TODO(gri): add big.Rat accessor to constant.Value.
n := valueToRat(num)
d := valueToRat(denom)
f.SetRat(n.Quo(n, d))
} else {
// Value too large to represent as a fraction => inaccessible.
// TODO(gri): add big.Float accessor to constant.Value.
_, ok := f.SetString(x.ExactString())
assert(ok)
}
return &f
}
// mpint exports a multi-precision integer.
//
// For unsigned types, small values are written out as a single
// byte. Larger values are written out as a length-prefixed big-endian
// byte string, where the length prefix is encoded as its complement.
// For example, bytes 0, 1, and 2 directly represent the integer
// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
// 2-, and 3-byte big-endian string follow.
//
// Encoding for signed types use the same general approach as for
// unsigned types, except small values use zig-zag encoding and the
// bottom bit of length prefix byte for large values is reserved as a
// sign bit.
//
// The exact boundary between small and large encodings varies
// according to the maximum number of bytes needed to encode a value
// of type typ. As a special case, 8-bit types are always encoded as a
// single byte.
//
// TODO(mdempsky): Is this level of complexity really worthwhile?
func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
basic, ok := typ.Underlying().(*types.Basic)
if !ok {
panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
}
signed, maxBytes := intSize(basic)
negative := x.Sign() < 0
if !signed && negative {
panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
}
b := x.Bytes()
if len(b) > 0 && b[0] == 0 {
panic(internalErrorf("leading zeros"))
}
if uint(len(b)) > maxBytes {
panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
}
maxSmall := 256 - maxBytes
if signed {
maxSmall = 256 - 2*maxBytes
}
if maxBytes == 1 {
maxSmall = 256
}
// Check if x can use small value encoding.
if len(b) <= 1 {
var ux uint
if len(b) == 1 {
ux = uint(b[0])
}
if signed {
ux <<= 1
if negative {
ux--
}
}
if ux < maxSmall {
w.data.WriteByte(byte(ux))
return
}
}
n := 256 - uint(len(b))
if signed {
n = 256 - 2*uint(len(b))
if negative {
n |= 1
}
}
if n < maxSmall || n >= 256 {
panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
}
w.data.WriteByte(byte(n))
w.data.Write(b)
}
// mpfloat exports a multi-precision floating point number.
//
// The number's value is decomposed into mantissa × 2**exponent, where
// mantissa is an integer. The value is written out as mantissa (as a
// multi-precision integer) and then the exponent, except exponent is
// omitted if mantissa is zero.
func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
if f.IsInf() {
panic("infinite constant")
}
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
var mant big.Float
exp := int64(f.MantExp(&mant))
// Scale so that mant is an integer.
prec := mant.MinPrec()
mant.SetMantExp(&mant, int(prec))
exp -= int64(prec)
manti, acc := mant.Int(nil)
if acc != big.Exact {
panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
}
w.mpint(manti, typ)
if manti.Sign() != 0 {
w.int64(exp)
}
}
func (w *exportWriter) bool(b bool) bool {
var x uint64
if b {
x = 1
}
w.uint64(x)
return b
}
func (w *exportWriter) int64(x int64) { w.data.int64(x) }
func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
func (w *exportWriter) localIdent(obj types.Object) {
// Anonymous parameters.
if obj == nil {
w.string("")
return
}
name := obj.Name()
if name == "_" {
w.string("_")
return
}
w.string(name)
}
type intWriter struct {
bytes.Buffer
}
func (w *intWriter) int64(x int64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutVarint(buf[:], x)
w.Write(buf[:n])
}
func (w *intWriter) uint64(x uint64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], x)
w.Write(buf[:n])
}
func assert(cond bool) {
if !cond {
panic("internal error: assertion failed")
}
}
// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
// a ready-to-use empty queue.
type objQueue struct {
ring []types.Object
head, tail int
}
// empty returns true if q contains no Nodes.
func (q *objQueue) empty() bool {
return q.head == q.tail
}
// pushTail appends n to the tail of the queue.
func (q *objQueue) pushTail(obj types.Object) {
if len(q.ring) == 0 {
q.ring = make([]types.Object, 16)
} else if q.head+len(q.ring) == q.tail {
// Grow the ring.
nring := make([]types.Object, len(q.ring)*2)
// Copy the old elements.
part := q.ring[q.head%len(q.ring):]
if q.tail-q.head <= len(part) {
part = part[:q.tail-q.head]
copy(nring, part)
} else {
pos := copy(nring, part)
copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
}
q.ring, q.head, q.tail = nring, 0, q.tail-q.head
}
q.ring[q.tail%len(q.ring)] = obj
q.tail++
}
// popHead pops a node from the head of the queue. It panics if q is empty.
func (q *objQueue) popHead() types.Object {
if q.empty() {
panic("dequeue empty")
}
obj := q.ring[q.head%len(q.ring)]
q.head++
return obj
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed package import.
// See cmd/compile/internal/gc/iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/constant"
"go/token"
"go/types"
"io"
"sort"
)
type intReader struct {
*bytes.Reader
path string
}
func (r *intReader) int64() int64 {
i, err := binary.ReadVarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
func (r *intReader) uint64() uint64 {
i, err := binary.ReadUvarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
const predeclReserved = 32
type itag uint64
const (
// Types
definedType itag = iota
pointerType
sliceType
arrayType
chanType
mapType
signatureType
structType
interfaceType
)
// IImportData imports a package from the serialized package data
// and returns 0 and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
pkgs, err := iimportCommon(fset, imports, data, false, path)
if err != nil {
return 0, nil, err
}
return 0, pkgs[0], nil
}
// IImportBundle imports a set of packages from the serialized package bundle.
func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
return iimportCommon(fset, imports, data, true, "")
}
func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) {
const currentVersion = 1
version := int64(-1)
defer func() {
if e := recover(); e != nil {
if version > currentVersion {
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
} else {
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
}
}
}()
r := &intReader{bytes.NewReader(data), path}
if bundle {
bundleVersion := r.uint64()
switch bundleVersion {
case bundleVersion:
default:
errorf("unknown bundle format version %d", bundleVersion)
}
}
version = int64(r.uint64())
switch version {
case currentVersion, 0:
default:
errorf("unknown iexport format version %d", version)
}
sLen := int64(r.uint64())
dLen := int64(r.uint64())
whence, _ := r.Seek(0, io.SeekCurrent)
stringData := data[whence : whence+sLen]
declData := data[whence+sLen : whence+sLen+dLen]
r.Seek(sLen+dLen, io.SeekCurrent)
p := iimporter{
ipath: path,
version: int(version),
stringData: stringData,
stringCache: make(map[uint64]string),
pkgCache: make(map[uint64]*types.Package),
declData: declData,
pkgIndex: make(map[*types.Package]map[string]uint64),
typCache: make(map[uint64]types.Type),
fake: fakeFileSet{
fset: fset,
files: make(map[string]*token.File),
},
}
for i, pt := range predeclared() {
p.typCache[uint64(i)] = pt
}
pkgList := make([]*types.Package, r.uint64())
for i := range pkgList {
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
_ = r.uint64() // package height; unused by go/types
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
pkg = types.NewPackage(pkgPath, pkgName)
imports[pkgPath] = pkg
} else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
p.pkgCache[pkgPathOff] = pkg
nameIndex := make(map[string]uint64)
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
name := p.stringAt(r.uint64())
nameIndex[name] = r.uint64()
}
p.pkgIndex[pkg] = nameIndex
pkgList[i] = pkg
}
if bundle {
pkgs = make([]*types.Package, r.uint64())
for i := range pkgs {
pkg := p.pkgAt(r.uint64())
imps := make([]*types.Package, r.uint64())
for j := range imps {
imps[j] = p.pkgAt(r.uint64())
}
pkg.SetImports(imps)
pkgs[i] = pkg
}
} else {
if len(pkgList) == 0 {
errorf("no packages found for %s", path)
panic("unreachable")
}
pkgs = pkgList[:1]
// record all referenced packages as imports
list := append(([]*types.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
pkgs[0].SetImports(list)
}
for _, pkg := range pkgs {
if pkg.Complete() {
continue
}
names := make([]string, 0, len(p.pkgIndex[pkg]))
for name := range p.pkgIndex[pkg] {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
p.doDecl(pkg, name)
}
// package was imported completely and without errors
pkg.MarkComplete()
}
for _, typ := range p.interfaceList {
typ.Complete()
}
return pkgs, nil
}
type iimporter struct {
ipath string
version int
stringData []byte
stringCache map[uint64]string
pkgCache map[uint64]*types.Package
declData []byte
pkgIndex map[*types.Package]map[string]uint64
typCache map[uint64]types.Type
fake fakeFileSet
interfaceList []*types.Interface
}
func (p *iimporter) doDecl(pkg *types.Package, name string) {
// See if we've already imported this declaration.
if obj := pkg.Scope().Lookup(name); obj != nil {
return
}
off, ok := p.pkgIndex[pkg][name]
if !ok {
errorf("%v.%v not in index", pkg, name)
}
r := &importReader{p: p, currPkg: pkg}
r.declReader.Reset(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
if s, ok := p.stringCache[off]; ok {
return s
}
slen, n := binary.Uvarint(p.stringData[off:])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
s := string(p.stringData[spos : spos+slen])
p.stringCache[off] = s
return s
}
func (p *iimporter) pkgAt(off uint64) *types.Package {
if pkg, ok := p.pkgCache[off]; ok {
return pkg
}
path := p.stringAt(off)
errorf("missing package %q in %q", path, p.ipath)
return nil
}
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
}
if off < predeclReserved {
errorf("predeclared type missing from cache: %v", off)
}
r := &importReader{p: p}
r.declReader.Reset(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
p.typCache[off] = t
}
return t
}
type importReader struct {
p *iimporter
declReader bytes.Reader
currPkg *types.Package
prevFile string
prevLine int64
prevColumn int64
}
func (r *importReader) obj(name string) {
tag := r.byte()
pos := r.pos()
switch tag {
case 'A':
typ := r.typ()
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
case 'C':
typ, val := r.value()
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
sig := r.signature(nil)
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
named.SetUnderlying(underlying)
if !isInterface(underlying) {
for n := r.uint64(); n > 0; n-- {
mpos := r.pos()
mname := r.ident()
recv := r.param()
msig := r.signature(recv)
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
}
}
case 'V':
typ := r.typ()
r.declare(types.NewVar(pos, r.currPkg, name, typ))
default:
errorf("unexpected tag: %v", tag)
}
}
func (r *importReader) declare(obj types.Object) {
obj.Pkg().Scope().Insert(obj)
}
func (r *importReader) value() (typ types.Type, val constant.Value) {
typ = r.typ()
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
case types.IsBoolean:
val = constant.MakeBool(r.bool())
case types.IsString:
val = constant.MakeString(r.string())
case types.IsInteger:
val = r.mpint(b)
case types.IsFloat:
val = r.mpfloat(b)
case types.IsComplex:
re := r.mpfloat(b)
im := r.mpfloat(b)
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
default:
if b.Kind() == types.Invalid {
val = constant.MakeUnknown()
return
}
errorf("unexpected type %v", typ) // panics
panic("unreachable")
}
return
}
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
if (b.Info() & types.IsUntyped) != 0 {
return true, 64
}
switch b.Kind() {
case types.Float32, types.Complex64:
return true, 3
case types.Float64, types.Complex128:
return true, 7
}
signed = (b.Info() & types.IsUnsigned) == 0
switch b.Kind() {
case types.Int8, types.Uint8:
maxBytes = 1
case types.Int16, types.Uint16:
maxBytes = 2
case types.Int32, types.Uint32:
maxBytes = 4
default:
maxBytes = 8
}
return
}
func (r *importReader) mpint(b *types.Basic) constant.Value {
signed, maxBytes := intSize(b)
maxSmall := 256 - maxBytes
if signed {
maxSmall = 256 - 2*maxBytes
}
if maxBytes == 1 {
maxSmall = 256
}
n, _ := r.declReader.ReadByte()
if uint(n) < maxSmall {
v := int64(n)
if signed {
v >>= 1
if n&1 != 0 {
v = ^v
}
}
return constant.MakeInt64(v)
}
v := -n
if signed {
v = -(n &^ 1) >> 1
}
if v < 1 || uint(v) > maxBytes {
errorf("weird decoding: %v, %v => %v", n, signed, v)
}
buf := make([]byte, v)
io.ReadFull(&r.declReader, buf)
// convert to little endian
// TODO(gri) go/constant should have a more direct conversion function
// (e.g., once it supports a big.Float based implementation)
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
buf[i], buf[j] = buf[j], buf[i]
}
x := constant.MakeFromBytes(buf)
if signed && n&1 != 0 {
x = constant.UnaryOp(token.SUB, x, 0)
}
return x
}
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
x := r.mpint(b)
if constant.Sign(x) == 0 {
return x
}
exp := r.int64()
switch {
case exp > 0:
x = constant.Shift(x, token.SHL, uint(exp))
// Ensure that the imported Kind is Float, else this constant may run into
// bitsize limits on overlarge integers. Eventually we can instead adopt
// the approach of CL 288632, but that CL relies on go/constant APIs that
// were introduced in go1.13.
//
// TODO(rFindley): sync the logic here with tip Go once we no longer
// support go1.12.
x = constant.ToFloat(x)
case exp < 0:
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
x = constant.BinaryOp(x, token.QUO, d)
}
return x
}
func (r *importReader) ident() string {
return r.string()
}
func (r *importReader) qualifiedIdent() (*types.Package, string) {
name := r.string()
pkg := r.pkg()
return pkg, name
}
func (r *importReader) pos() token.Pos {
if r.p.version >= 1 {
r.posv1()
} else {
r.posv0()
}
if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
return token.NoPos
}
return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
}
func (r *importReader) posv0() {
delta := r.int64()
if delta != deltaNewFile {
r.prevLine += delta
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
r.prevFile = r.string()
r.prevLine = l
}
}
func (r *importReader) posv1() {
delta := r.int64()
r.prevColumn += delta >> 1
if delta&1 != 0 {
delta = r.int64()
r.prevLine += delta >> 1
if delta&1 != 0 {
r.prevFile = r.string()
}
}
}
func (r *importReader) typ() types.Type {
return r.p.typAt(r.uint64(), nil)
}
func isInterface(t types.Type) bool {
_, ok := t.(*types.Interface)
return ok
}
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) types.Type {
switch k := r.kind(); k {
default:
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
return nil
case definedType:
pkg, name := r.qualifiedIdent()
r.p.doDecl(pkg, name)
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
case pointerType:
return types.NewPointer(r.typ())
case sliceType:
return types.NewSlice(r.typ())
case arrayType:
n := r.uint64()
return types.NewArray(r.typ(), int64(n))
case chanType:
dir := chanDir(int(r.uint64()))
return types.NewChan(dir, r.typ())
case mapType:
return types.NewMap(r.typ(), r.typ())
case signatureType:
r.currPkg = r.pkg()
return r.signature(nil)
case structType:
r.currPkg = r.pkg()
fields := make([]*types.Var, r.uint64())
tags := make([]string, len(fields))
for i := range fields {
fpos := r.pos()
fname := r.ident()
ftyp := r.typ()
emb := r.bool()
tag := r.string()
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
tags[i] = tag
}
return types.NewStruct(fields, tags)
case interfaceType:
r.currPkg = r.pkg()
embeddeds := make([]types.Type, r.uint64())
for i := range embeddeds {
_ = r.pos()
embeddeds[i] = r.typ()
}
methods := make([]*types.Func, r.uint64())
for i := range methods {
mpos := r.pos()
mname := r.ident()
// TODO(mdempsky): Matches bimport.go, but I
// don't agree with this.
var recv *types.Var
if base != nil {
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
}
msig := r.signature(recv)
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
}
typ := newInterface(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
}
}
func (r *importReader) kind() itag {
return itag(r.uint64())
}
func (r *importReader) signature(recv *types.Var) *types.Signature {
params := r.paramList()
results := r.paramList()
variadic := params.Len() > 0 && r.bool()
return types.NewSignature(recv, params, results, variadic)
}
func (r *importReader) paramList() *types.Tuple {
xs := make([]*types.Var, r.uint64())
for i := range xs {
xs[i] = r.param()
}
return types.NewTuple(xs...)
}
func (r *importReader) param() *types.Var {
pos := r.pos()
name := r.ident()
typ := r.typ()
return types.NewParam(pos, r.currPkg, name, typ)
}
func (r *importReader) bool() bool {
return r.uint64() != 0
}
func (r *importReader) int64() int64 {
n, err := binary.ReadVarint(&r.declReader)
if err != nil {
errorf("readVarint: %v", err)
}
return n
}
func (r *importReader) uint64() uint64 {
n, err := binary.ReadUvarint(&r.declReader)
if err != nil {
errorf("readUvarint: %v", err)
}
return n
}
func (r *importReader) byte() byte {
x, err := r.declReader.ReadByte()
if err != nil {
errorf("declReader.ReadByte: %v", err)
}
return x
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.11
// +build !go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
named := make([]*types.Named, len(embeddeds))
for i, e := range embeddeds {
var ok bool
named[i], ok = e.(*types.Named)
if !ok {
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
}
}
return types.NewInterface(methods, named)
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.11
// +build go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
return types.NewInterfaceType(methods, embeddeds)
}