Commit bde98244 authored by ale's avatar ale

Update vendor deps

parent cb57aefa
Pipeline #829 passed with stages
in 1 minute and 22 seconds
// Copyright (c) 2015 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
// +build ignore
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"unicode"
)
var url = flag.String("url",
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
"URL of Unicode database directory")
var verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
var localFiles = flag.Bool("local",
false,
"data files have been copied to the current directory; for debugging only")
var outputFile = flag.String("output",
"",
"output file for generated tables; default stdout")
var output *bufio.Writer
func main() {
flag.Parse()
setupOutput()
graphemeTests := make([]test, 0)
graphemeComments := make([]string, 0)
graphemeTests, graphemeComments = loadUnicodeData("GraphemeBreakTest.txt", graphemeTests, graphemeComments)
wordTests := make([]test, 0)
wordComments := make([]string, 0)
wordTests, wordComments = loadUnicodeData("WordBreakTest.txt", wordTests, wordComments)
sentenceTests := make([]test, 0)
sentenceComments := make([]string, 0)
sentenceTests, sentenceComments = loadUnicodeData("SentenceBreakTest.txt", sentenceTests, sentenceComments)
fmt.Fprintf(output, fileHeader, *url)
generateTestTables("Grapheme", graphemeTests, graphemeComments)
generateTestTables("Word", wordTests, wordComments)
generateTestTables("Sentence", sentenceTests, sentenceComments)
flushOutput()
}
// WordBreakProperty.txt has the form:
// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
func openReader(file string) (input io.ReadCloser) {
if *localFiles {
f, err := os.Open(file)
if err != nil {
log.Fatal(err)
}
input = f
} else {
path := *url + file
resp, err := http.Get(path)
if err != nil {
log.Fatal(err)
}
if resp.StatusCode != 200 {
log.Fatal("bad GET status for "+file, resp.Status)
}
input = resp.Body
}
return
}
func loadUnicodeData(filename string, tests []test, comments []string) ([]test, []string) {
f := openReader(filename)
defer f.Close()
bufioReader := bufio.NewReader(f)
line, err := bufioReader.ReadString('\n')
for err == nil {
tests, comments = parseLine(line, tests, comments)
line, err = bufioReader.ReadString('\n')
}
// if the err was EOF still need to process last value
if err == io.EOF {
tests, comments = parseLine(line, tests, comments)
}
return tests, comments
}
const comment = "#"
const brk = "÷"
const nbrk = "×"
type test [][]byte
func parseLine(line string, tests []test, comments []string) ([]test, []string) {
if strings.HasPrefix(line, comment) {
return tests, comments
}
line = strings.TrimSpace(line)
if len(line) == 0 {
return tests, comments
}
commentStart := strings.Index(line, comment)
comment := strings.TrimSpace(line[commentStart+1:])
if commentStart > 0 {
line = line[0:commentStart]
}
pieces := strings.Split(line, brk)
t := make(test, 0)
for _, piece := range pieces {
piece = strings.TrimSpace(piece)
if len(piece) > 0 {
codePoints := strings.Split(piece, nbrk)
word := ""
for _, codePoint := range codePoints {
codePoint = strings.TrimSpace(codePoint)
r, err := strconv.ParseInt(codePoint, 16, 64)
if err != nil {
log.Printf("err: %v for '%s'", err, string(r))
return tests, comments
}
word += string(r)
}
t = append(t, []byte(word))
}
}
tests = append(tests, t)
comments = append(comments, comment)
return tests, comments
}
func generateTestTables(prefix string, tests []test, comments []string) {
fmt.Fprintf(output, testHeader, prefix)
for i, t := range tests {
fmt.Fprintf(output, "\t\t{\n")
fmt.Fprintf(output, "\t\t\tinput: %#v,\n", bytes.Join(t, []byte{}))
fmt.Fprintf(output, "\t\t\toutput: %s,\n", generateTest(t))
fmt.Fprintf(output, "\t\t\tcomment: `%s`,\n", comments[i])
fmt.Fprintf(output, "\t\t},\n")
}
fmt.Fprintf(output, "}\n")
}
func generateTest(t test) string {
rv := "[][]byte{"
for _, te := range t {
rv += fmt.Sprintf("%#v,", te)
}
rv += "}"
return rv
}
const fileHeader = `// Generated by running
// maketesttables --url=%s
// DO NOT EDIT
package segment
`
const testHeader = `var unicode%sTests = []struct {
input []byte
output [][]byte
comment string
}{
`
func setupOutput() {
output = bufio.NewWriter(startGofmt())
}
// startGofmt connects output to a gofmt process if -output is set.
func startGofmt() io.Writer {
if *outputFile == "" {
return os.Stdout
}
stdout, err := os.Create(*outputFile)
if err != nil {
log.Fatal(err)
}
// Pipe output to gofmt.
gofmt := exec.Command("gofmt")
fd, err := gofmt.StdinPipe()
if err != nil {
log.Fatal(err)
}
gofmt.Stdout = stdout
gofmt.Stderr = os.Stderr
err = gofmt.Start()
if err != nil {
log.Fatal(err)
}
return fd
}
func flushOutput() {
err := output.Flush()
if err != nil {
log.Fatal(err)
}
}
......@@ -32,6 +32,11 @@ type DB struct {
// Need 64-bit alignment.
seq uint64
// Stats. Need 64-bit alignment.
cWriteDelay int64 // The cumulative duration of write delays
cWriteDelayN int32 // The cumulative number of write delays
aliveSnaps, aliveIters int32
// Session.
s *session
......@@ -49,9 +54,6 @@ type DB struct {
snapsMu sync.Mutex
snapsList *list.List
// Stats.
aliveSnaps, aliveIters int32
// Write.
batchPool sync.Pool
writeMergeC chan writeMerge
......@@ -321,7 +323,7 @@ func recoverTable(s *session, o *opt.Options) error {
}
}
err = iter.Error()
if err != nil {
if err != nil && !errors.IsCorrupted(err) {
return
}
err = tw.Close()
......@@ -392,7 +394,7 @@ func recoverTable(s *session, o *opt.Options) error {
}
imax = append(imax[:0], key...)
}
if err := iter.Error(); err != nil {
if err := iter.Error(); err != nil && !errors.IsCorrupted(err) {
iter.Release()
return err
}
......@@ -904,6 +906,8 @@ func (db *DB) GetSnapshot() (*Snapshot, error) {
// Returns the number of files at level 'n'.
// leveldb.stats
// Returns statistics of the underlying DB.
// leveldb.writedelay
// Returns cumulative write delay caused by compaction.
// leveldb.sstables
// Returns sstables list for each level.
// leveldb.blockpool
......@@ -955,6 +959,9 @@ func (db *DB) GetProperty(name string) (value string, err error) {
level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
float64(read)/1048576.0, float64(write)/1048576.0)
}
case p == "writedelay":
writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay))
value = fmt.Sprintf("DelayN:%d Delay:%s", writeDelayN, writeDelay)
case p == "sstables":
for level, tables := range v.levels {
value += fmt.Sprintf("--- level %d ---\n", level)
......
......@@ -7,6 +7,7 @@
package leveldb
import (
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/memdb"
......@@ -117,6 +118,8 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
db.writeDelayN++
} else if db.writeDelayN > 0 {
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN))
atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay))
db.writeDelay = 0
db.writeDelayN = 0
}
......
......@@ -88,7 +88,7 @@ type Iterator interface {
// its contents may change on the next call to any 'seeks method'.
Key() []byte
// Value returns the key of the current key/value pair, or nil if done.
// Value returns the value of the current key/value pair, or nil if done.
// The caller should not modify the contents of the returned slice, and
// its contents may change on the next call to any 'seeks method'.
Value() []byte
......
......@@ -329,7 +329,7 @@ func (p *DB) Delete(key []byte) error {
h := p.nodeData[node+nHeight]
for i, n := range p.prevNode[:h] {
m := n + 4 + i
m := n + nNext + i
p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
}
......
......@@ -19,7 +19,7 @@ var (
// Releaser is the interface that wraps the basic Release method.
type Releaser interface {
// Release releases associated resources. Release should always success
// and can be called multipe times without causing error.
// and can be called multiple times without causing error.
Release()
}
......
......@@ -5,6 +5,8 @@
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
......
This diff is collapsed.
This diff is collapsed.
......@@ -52,10 +52,12 @@ var isSpecialElementMap = map[string]bool{
"iframe": true,
"img": true,
"input": true,
"isindex": true,
"isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility.
"keygen": true,
"li": true,
"link": true,
"listing": true,
"main": true,
"marquee": true,
"menu": true,
"meta": true,
......
......@@ -49,18 +49,18 @@ call to Next. For example, to extract an HTML page's anchor text:
for {
tt := z.Next()
switch tt {
case ErrorToken:
case html.ErrorToken:
return z.Err()
case TextToken:
case html.TextToken:
if depth > 0 {
// emitBytes should copy the []byte it receives,
// if it doesn't process it immediately.
emitBytes(z.Text())
}
case StartTagToken, EndTagToken:
case html.StartTagToken, html.EndTagToken:
tn, _ := z.TagName()
if len(tn) == 1 && tn[0] == 'a' {
if tt == StartTagToken {
if tt == html.StartTagToken {
depth++
} else {
depth--
......
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkpost processes the output of cgo -godefs to
// modify the generated types. It is used to clean up
// the sys API in an architecture specific manner.
//
// mkpost is run after cgo -godefs; see README.md.
package main
import (
"bytes"
"fmt"
"go/format"
"io/ioutil"
"log"
"os"
"regexp"
)
func main() {
// Get the OS and architecture (using GOARCH_TARGET if it exists)
goos := os.Getenv("GOOS")
goarch := os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check that we are using the new build system if we should be.
if goos == "linux" && goarch != "sparc64" {
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
os.Stderr.WriteString("In the new build system, mkpost should not be called directly.\n")
os.Stderr.WriteString("See README.md\n")
os.Exit(1)
}
}
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
// If we have empty Ptrace structs, we should delete them. Only s390x emits
// nonempty Ptrace structs.
ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
b = ptraceRexexp.ReplaceAll(b, nil)
// Replace the control_regs union with a blank identifier for now.
controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
// Remove fields that are added by glibc
// Note that this is unstable as the identifers are private.
removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// We refuse to export private fields on s390x
if goarch == "s390x" && goos == "linux" {
// Remove cgo padding fields
removeFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove padding, hidden, or unused fields
removeFieldsRegex = regexp.MustCompile(`X_\S+`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
}
// Remove the first line of warning from cgo
b = b[bytes.IndexByte(b, '\n')+1:]
// Modify the command in the header to include:
// mkpost, our own warning, and a build tag.
replacement := fmt.Sprintf(`$1 | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s,%s`, goarch, goos)
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
// gofmt
b, err = format.Source(b)
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(b)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define __DARWIN_UNIX03 0
#define KERNEL
#define _DARWIN_USE_64_BIT_INODE
#include <dirent.h>
#include <fcntl.h>
#include <signal.h>
#include <termios.h>
#include <unistd.h>
#include <mach/mach.h>
#include <mach/message.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_var.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics; for internal use.
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat64
type Statfs_t C.struct_statfs64
type Flock_t C.struct_flock
type Fstore_t C.struct_fstore
type Radvisory_t C.struct_radvisory
type Fbootstraptransfer_t C.struct_fbootstraptransfer
type Log2phys_t C.struct_log2phys
type Fsid C.struct_fsid
type Dirent C.struct_dirent
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet4Pktinfo C.struct_in_pktinfo
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfmaMsghdr C.struct_ifma_msghdr
type IfmaMsghdr2 C.struct_ifma_msghdr2
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD