Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
  • renovate/git.autistici.org-ai3-go-common-digest
  • renovate/github.com-miekg-dns-1.x
  • renovate/github.com-prometheus-client_golang-1.x
  • renovate/golang.org-x-crypto-0.x
  • renovate/golang.org-x-net-0.x
  • v2
  • v3
8 results

Target

Select target project
  • ai3/tools/acmeserver
  • godog/acmeserver
  • svp-bot/acmeserver
3 results
Select Git revision
  • lintian-fixes
  • master
  • renovate/github.com-miekg-dns-1.x
  • renovate/golang.org-x-crypto-digest
4 results
Show changes
Showing
with 5987 additions and 2175 deletions
......@@ -20,7 +20,7 @@ func Field(r RR, i int) string {
return ""
}
d := reflect.ValueOf(r).Elem().Field(i)
switch k := d.Kind(); k {
switch d.Kind() {
case reflect.String:
return d.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
......@@ -31,6 +31,9 @@ func Field(r RR, i int) string {
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
case `dns:"a"`:
// TODO(miek): Hmm store this as 16 bytes
if d.Len() < net.IPv4len {
return ""
}
if d.Len() < net.IPv6len {
return net.IPv4(byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
......@@ -42,6 +45,9 @@ func Field(r RR, i int) string {
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint())).String()
case `dns:"aaaa"`:
if d.Len() < net.IPv6len {
return ""
}
return net.IP{
byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
......@@ -69,15 +75,6 @@ func Field(r RR, i int) string {
s += " " + Type(d.Index(i).Uint()).String()
}
return s
case `dns:"wks"`:
if d.Len() == 0 {
return ""
}
s := strconv.Itoa(int(d.Index(0).Uint()))
for i := 0; i < d.Len(); i++ {
s += " " + strconv.Itoa(int(d.Index(i).Uint()))
}
return s
default:
// if it does not have a tag its a string slice
fallthrough
......
// +build fuzz
package dns
import "strings"
func Fuzz(data []byte) int {
msg := new(Msg)
if err := msg.Unpack(data); err != nil {
return 0
}
if _, err := msg.Pack(); err != nil {
return 0
}
return 1
}
func FuzzNewRR(data []byte) int {
str := string(data)
// Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer
// at avoiding them.
// See GH#1025 for context.
if strings.Contains(strings.ToUpper(str), "$INCLUDE") {
return -1
}
if _, err := NewRR(str); err != nil {
return 0
}
return 1
}
package dns
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
// Parse the $GENERATE statement as used in BIND9 zones.
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
// We are called after '$GENERATE '. After which we expect:
// * the range (12-24/2)
// * lhs (ownername)
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
func (zp *ZoneParser) generate(l lex) (RR, bool) {
token := l.token
step := int64(1)
if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(token) {
return zp.setParseError("bad step in $GENERATE range", l)
}
s, err := strconv.ParseInt(token[i+1:], 10, 64)
if err != nil || s <= 0 {
return zp.setParseError("bad step in $GENERATE range", l)
}
step = s
token = token[:i]
}
sx := strings.SplitN(token, "-", 2)
if len(sx) != 2 {
return zp.setParseError("bad start-stop in $GENERATE range", l)
}
start, err := strconv.ParseInt(sx[0], 10, 64)
if err != nil {
return zp.setParseError("bad start in $GENERATE range", l)
}
end, err := strconv.ParseInt(sx[1], 10, 64)
if err != nil {
return zp.setParseError("bad stop in $GENERATE range", l)
}
if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
return zp.setParseError("bad range in $GENERATE range", l)
}
// _BLANK
l, ok := zp.c.Next()
if !ok || l.value != zBlank {
return zp.setParseError("garbage after $GENERATE range", l)
}
// Create a complete new string, which we then parse again.
var s string
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
if l.err {
return zp.setParseError("bad data in $GENERATE directive", l)
}
if l.value == zNewline {
break
}
s += l.token
}
r := &generateReader{
s: s,
cur: start,
start: start,
end: end,
step: step,
file: zp.file,
lex: &l,
}
zp.sub = NewZoneParser(r, zp.origin, zp.file)
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
zp.sub.generateDisallowed = true
zp.sub.SetDefaultTTL(defaultTtl)
return zp.subNext()
}
type generateReader struct {
s string
si int
cur int64
start int64
end int64
step int64
mod bytes.Buffer
escape bool
eof bool
file string
lex *lex
}
func (r *generateReader) parseError(msg string, end int) *ParseError {
r.eof = true // Make errors sticky.
l := *r.lex
l.token = r.s[r.si-1 : end]
l.column += r.si // l.column starts one zBLANK before r.s
return &ParseError{r.file, msg, l}
}
func (r *generateReader) Read(p []byte) (int, error) {
// NewZLexer, through NewZoneParser, should use ReadByte and
// not end up here.
panic("not implemented")
}
func (r *generateReader) ReadByte() (byte, error) {
if r.eof {
return 0, io.EOF
}
if r.mod.Len() > 0 {
return r.mod.ReadByte()
}
if r.si >= len(r.s) {
r.si = 0
r.cur += r.step
r.eof = r.cur > r.end || r.cur < 0
return '\n', nil
}
si := r.si
r.si++
switch r.s[si] {
case '\\':
if r.escape {
r.escape = false
return '\\', nil
}
r.escape = true
return r.ReadByte()
case '$':
if r.escape {
r.escape = false
return '$', nil
}
mod := "%d"
if si >= len(r.s)-1 {
// End of the string
fmt.Fprintf(&r.mod, mod, r.cur)
return r.mod.ReadByte()
}
if r.s[si+1] == '$' {
r.si++
return '$', nil
}
var offset int64
// Search for { and }
if r.s[si+1] == '{' {
// Modifier block
sep := strings.Index(r.s[si+2:], "}")
if sep < 0 {
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
}
var errMsg string
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
if r.start+offset < 0 || r.end+offset > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
}
r.si += 2 + sep // Jump to it
}
fmt.Fprintf(&r.mod, mod, r.cur+offset)
return r.mod.ReadByte()
default:
if r.escape { // Pretty useless here
r.escape = false
return r.ReadByte()
}
return r.s[si], nil
}
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int64, string) {
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary.
var offStr, widthStr, base string
switch xs := strings.Split(s, ","); len(xs) {
case 1:
offStr, widthStr, base = xs[0], "0", "d"
case 2:
offStr, widthStr, base = xs[0], xs[1], "d"
case 3:
offStr, widthStr, base = xs[0], xs[1], xs[2]
default:
return "", 0, "bad modifier in $GENERATE"
}
switch base {
case "o", "d", "x", "X":
default:
return "", 0, "bad base in $GENERATE"
}
offset, err := strconv.ParseInt(offStr, 10, 64)
if err != nil {
return "", 0, "bad offset in $GENERATE"
}
width, err := strconv.ParseInt(widthStr, 10, 64)
if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE"
}
if width == 0 {
return "%" + base, offset, ""
}
return "%0" + widthStr + base, offset, ""
}
package dns
import (
"bytes"
"crypto"
"hash"
)
// identityHash will not hash, it only buffers the data written into it and returns it as-is.
type identityHash struct {
b *bytes.Buffer
}
// Implement the hash.Hash interface.
func (i identityHash) Write(b []byte) (int, error) { return i.b.Write(b) }
func (i identityHash) Size() int { return i.b.Len() }
func (i identityHash) BlockSize() int { return 1024 }
func (i identityHash) Reset() { i.b.Reset() }
func (i identityHash) Sum(b []byte) []byte { return append(b, i.b.Bytes()...) }
func hashFromAlgorithm(alg uint8) (hash.Hash, crypto.Hash, error) {
hashnumber, ok := AlgorithmToHash[alg]
if !ok {
return nil, 0, ErrAlg
}
if hashnumber == 0 {
return identityHash{b: &bytes.Buffer{}}, hashnumber, nil
}
return hashnumber.New(), hashnumber, nil
}
......@@ -4,17 +4,19 @@ package dns
// SplitDomainName splits a name string into it's labels.
// www.miek.nl. returns []string{"www", "miek", "nl"}
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
// The root label (.) returns nil. Note that using
// strings.Split(s) will work in most cases, but does not handle
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
if len(s) == 0 {
if s == "" {
return nil
}
fqdnEnd := 0 // offset of the final '.' or the length of the name
idx := Split(s)
begin := 0
if s[len(s)-1] == '.' {
if IsFqdn(s) {
fqdnEnd = len(s) - 1
} else {
fqdnEnd = len(s)
......@@ -26,43 +28,40 @@ func SplitDomainName(s string) (labels []string) {
case 1:
// no-op
default:
end := 0
for i := 1; i < len(idx); i++ {
end = idx[i]
for _, end := range idx[1:] {
labels = append(labels, s[begin:end-1])
begin = end
}
}
labels = append(labels, s[begin:fqdnEnd])
return labels
return append(labels, s[begin:fqdnEnd])
}
// CompareDomainName compares the names s1 and s2 and
// returns how many labels they have in common starting from the *right*.
// The comparison stops at the first inequality. The names are not downcased
// The comparison stops at the first inequality. The names are downcased
// before the comparison.
//
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
// www.miek.nl. and www.bla.nl. have one label in common: nl
//
// s1 and s2 must be syntactically valid domain names.
func CompareDomainName(s1, s2 string) (n int) {
s1 = Fqdn(s1)
s2 = Fqdn(s2)
l1 := Split(s1)
l2 := Split(s2)
// the first check: root label
if l1 == nil || l2 == nil {
return
if s1 == "." || s2 == "." {
return 0
}
l1 := Split(s1)
l2 := Split(s2)
j1 := len(l1) - 1 // end
i1 := len(l1) - 2 // start
j2 := len(l2) - 1
i2 := len(l2) - 2
// the second check can be done here: last/only label
// before we fall through into the for-loop below
if s1[l1[j1]:] == s2[l2[j2]:] {
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
n++
} else {
return
......@@ -71,7 +70,7 @@ func CompareDomainName(s1, s2 string) (n int) {
if i1 < 0 || i2 < 0 {
break
}
if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] {
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
n++
} else {
break
......@@ -84,7 +83,8 @@ func CompareDomainName(s1, s2 string) (n int) {
return
}
// CountLabel counts the the number of labels in the string s.
// CountLabel counts the number of labels in the string s.
// s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) {
if s == "." {
return
......@@ -103,6 +103,7 @@ func CountLabel(s string) (labels int) {
// Split splits a name s into its label indexes.
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
// The root name (.) returns nil. Also see SplitDomainName.
// s must be a syntactically valid domain name.
func Split(s string) []int {
if s == "." {
return nil
......@@ -125,20 +126,23 @@ func Split(s string) []int {
// The bool end is true when the end of the string has been reached.
// Also see PrevLabel.
func NextLabel(s string, offset int) (i int, end bool) {
quote := false
if s == "" {
return 0, true
}
for i = offset; i < len(s)-1; i++ {
switch s[i] {
case '\\':
quote = !quote
default:
quote = false
case '.':
if quote {
quote = !quote
continue
}
return i + 1, false
if s[i] != '.' {
continue
}
j := i - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-i)%2 == 0 {
continue
}
return i + 1, false
}
return i + 1, true
}
......@@ -148,15 +152,61 @@ func NextLabel(s string, offset int) (i int, end bool) {
// The bool start is true when the start of the string has been overshot.
// Also see NextLabel.
func PrevLabel(s string, n int) (i int, start bool) {
if s == "" {
return 0, true
}
if n == 0 {
return len(s), false
}
lab := Split(s)
if lab == nil {
return 0, true
l := len(s) - 1
if s[l] == '.' {
l--
}
if n > len(lab) {
return 0, true
for ; l >= 0 && n > 0; l-- {
if s[l] != '.' {
continue
}
j := l - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-l)%2 == 0 {
continue
}
n--
if n == 0 {
return l + 1, false
}
}
return 0, n > 1
}
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
func equal(a, b string) bool {
// might be lifted into API function.
la := len(a)
lb := len(b)
if la != lb {
return false
}
for i := la - 1; i >= 0; i-- {
ai := a[i]
bi := b[i]
if ai >= 'A' && ai <= 'Z' {
ai |= 'a' - 'A'
}
if bi >= 'A' && bi <= 'Z' {
bi |= 'a' - 'A'
}
if ai != bi {
return false
}
}
return lab[len(lab)-n], false
return true
}
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns
import "net"
const supportsReusePort = false
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.Listen(network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.ListenPacket(network, addr)
}
// +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns
import (
"context"
"net"
"syscall"
"golang.org/x/sys/unix"
)
const supportsReusePort = true
func reuseportControl(network, address string, c syscall.RawConn) error {
var opErr error
err := c.Control(func(fd uintptr) {
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
})
if err != nil {
return err
}
return opErr
}
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.Listen(context.Background(), network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.ListenPacket(context.Background(), network, addr)
}
......@@ -8,61 +8,87 @@
package dns
//go:generate go run msg_generate.go
import (
"encoding/base32"
"encoding/base64"
"encoding/hex"
"crypto/rand"
"encoding/binary"
"fmt"
"math/big"
"math/rand"
"net"
"reflect"
"strconv"
"time"
"strings"
)
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
const (
maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4
// This is the maximum number of compression pointers that should occur in a
// semantically valid message. Each label in a domain name must be at least one
// octet and is separated by a period. The root label won't be represented by a
// compression pointer to a compression pointer, hence the -2 to exclude the
// smallest valid root label.
//
// It is possible to construct a valid message that has more compression pointers
// than this, and still doesn't loop, by pointing to a previous pointer. This is
// not something a well written implementation should ever do, so we leave them
// to trip the maximum compression pointer check.
maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2
// This is the maximum length of a domain name in presentation format. The
// maximum wire length of a domain name is 255 octets (see above), with the
// maximum label length being 63. The wire format requires one extra byte over
// the presentation format, reducing the number of octets by 1. Each label in
// the name will be separated by a single period, with each octet in the label
// expanding to at most 4 bytes (\DDD). If all other labels are of the maximum
// length, then the final label can only be 61 octets long to not exceed the
// maximum allowed wire length.
maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1
)
// Errors defined in this package.
var (
// ErrAlg indicates an error with the (DNSSEC) algorithm.
ErrAlg error = &Error{err: "bad algorithm"}
// ErrAuth indicates an error in the TSIG authentication.
ErrAuth error = &Error{err: "bad authentication"}
// ErrBuf indicates that the buffer used it too small for the message.
ErrBuf error = &Error{err: "buffer size too small"}
// ErrConnEmpty indicates a connection is being uses before it is initialized.
ErrConnEmpty error = &Error{err: "conn has no connection"}
// ErrExtendedRcode ...
ErrExtendedRcode error = &Error{err: "bad extended rcode"}
// ErrFqdn indicates that a domain name does not have a closing dot.
ErrFqdn error = &Error{err: "domain must be fully qualified"}
// ErrId indicates there is a mismatch with the message's ID.
ErrId error = &Error{err: "id mismatch"}
// ErrKeyAlg indicates that the algorithm in the key is not valid.
ErrKeyAlg error = &Error{err: "bad key algorithm"}
ErrKey error = &Error{err: "bad key"}
ErrKeySize error = &Error{err: "bad key size"}
ErrNoSig error = &Error{err: "no signature found"}
ErrPrivKey error = &Error{err: "bad private key"}
ErrRcode error = &Error{err: "bad rcode"}
ErrRdata error = &Error{err: "bad rdata"}
ErrRRset error = &Error{err: "bad rrset"}
ErrSecret error = &Error{err: "no secrets defined"}
ErrShortRead error = &Error{err: "short read"}
// ErrSig indicates that a signature can not be cryptographically validated.
ErrSig error = &Error{err: "bad signature"}
// ErrSOA indicates that no SOA RR was seen when doing zone transfers.
ErrSoa error = &Error{err: "no SOA"}
// ErrTime indicates a timing error in TSIG authentication.
ErrTime error = &Error{err: "bad time"}
ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm.
ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication.
ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message.
ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized.
ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ...
ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID.
ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid.
ErrKey error = &Error{err: "bad key"}
ErrKeySize error = &Error{err: "bad key size"}
ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)}
ErrNoSig error = &Error{err: "no signature found"}
ErrPrivKey error = &Error{err: "bad private key"}
ErrRcode error = &Error{err: "bad rcode"}
ErrRdata error = &Error{err: "bad rdata"}
ErrRRset error = &Error{err: "bad rrset"}
ErrSecret error = &Error{err: "no secrets defined"}
ErrShortRead error = &Error{err: "short read"}
ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated.
ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication.
)
// Id, by default, returns a 16 bits random number to be used as a
// message id. The random provided should be good enough. This being a
// variable the function can be reassigned to a custom function.
// For instance, to make it return a static value:
// Id by default returns a 16-bit random number to be used as a message id. The
// number is drawn from a cryptographically secure random number generator.
// This being a variable the function can be reassigned to a custom function.
// For instance, to make it return a static value for testing:
//
// dns.Id = func() uint16 { return 3 }
var Id func() uint16 = id
var Id = id
// id returns a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func id() uint16 {
var output uint16
err := binary.Read(rand.Reader, binary.BigEndian, &output)
if err != nil {
panic("dns: reading random id failed: " + err.Error())
}
return output
}
// MsgHdr is a a manually-unpacked version of (id, bits).
type MsgHdr struct {
......@@ -82,103 +108,13 @@ type MsgHdr struct {
// Msg contains the layout of a DNS message.
type Msg struct {
MsgHdr
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. This not part of the official DNS packet format.
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format.
Question []Question // Holds the RR(s) of the question section.
Answer []RR // Holds the RR(s) of the answer section.
Ns []RR // Holds the RR(s) of the authority section.
Extra []RR // Holds the RR(s) of the additional section.
}
// TypeToString is a map of strings for each RR wire type.
var TypeToString = map[uint16]string{
TypeA: "A",
TypeAAAA: "AAAA",
TypeAFSDB: "AFSDB",
TypeANY: "ANY", // Meta RR
TypeATMA: "ATMA",
TypeAXFR: "AXFR", // Meta RR
TypeCAA: "CAA",
TypeCDNSKEY: "CDNSKEY",
TypeCDS: "CDS",
TypeCERT: "CERT",
TypeCNAME: "CNAME",
TypeDHCID: "DHCID",
TypeDLV: "DLV",
TypeDNAME: "DNAME",
TypeDNSKEY: "DNSKEY",
TypeDS: "DS",
TypeEID: "EID",
TypeEUI48: "EUI48",
TypeEUI64: "EUI64",
TypeGID: "GID",
TypeGPOS: "GPOS",
TypeHINFO: "HINFO",
TypeHIP: "HIP",
TypeIPSECKEY: "IPSECKEY",
TypeISDN: "ISDN",
TypeIXFR: "IXFR", // Meta RR
TypeKEY: "KEY",
TypeKX: "KX",
TypeL32: "L32",
TypeL64: "L64",
TypeLOC: "LOC",
TypeLP: "LP",
TypeMB: "MB",
TypeMD: "MD",
TypeMF: "MF",
TypeMG: "MG",
TypeMINFO: "MINFO",
TypeMR: "MR",
TypeMX: "MX",
TypeNAPTR: "NAPTR",
TypeNID: "NID",
TypeNINFO: "NINFO",
TypeNIMLOC: "NIMLOC",
TypeNS: "NS",
TypeNSAPPTR: "NSAP-PTR",
TypeNSEC3: "NSEC3",
TypeNSEC3PARAM: "NSEC3PARAM",
TypeNSEC: "NSEC",
TypeNULL: "NULL",
TypeOPT: "OPT",
TypeOPENPGPKEY: "OPENPGPKEY",
TypePTR: "PTR",
TypeRKEY: "RKEY",
TypeRP: "RP",
TypeRRSIG: "RRSIG",
TypeRT: "RT",
TypeSIG: "SIG",
TypeSOA: "SOA",
TypeSPF: "SPF",
TypeSRV: "SRV",
TypeSSHFP: "SSHFP",
TypeTA: "TA",
TypeTALINK: "TALINK",
TypeTKEY: "TKEY", // Meta RR
TypeTLSA: "TLSA",
TypeTSIG: "TSIG", // Meta RR
TypeTXT: "TXT",
TypePX: "PX",
TypeUID: "UID",
TypeUINFO: "UINFO",
TypeUNSPEC: "UNSPEC",
TypeURI: "URI",
TypeWKS: "WKS",
TypeX25: "X25",
}
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// Map of opcodes strings.
var StringToOpcode = reverseInt(OpcodeToString)
// Map of rcodes strings.
var StringToRcode = reverseInt(RcodeToString)
// ClassToString is a maps Classes to strings for each CLASS wire type.
var ClassToString = map[uint16]string{
ClassINET: "IN",
......@@ -204,28 +140,56 @@ var RcodeToString = map[int]string{
RcodeFormatError: "FORMERR",
RcodeServerFailure: "SERVFAIL",
RcodeNameError: "NXDOMAIN",
RcodeNotImplemented: "NOTIMPL",
RcodeNotImplemented: "NOTIMP",
RcodeRefused: "REFUSED",
RcodeYXDomain: "YXDOMAIN", // From RFC 2136
RcodeYXDomain: "YXDOMAIN", // See RFC 2136
RcodeYXRrset: "YXRRSET",
RcodeNXRrset: "NXRRSET",
RcodeNotAuth: "NOTAUTH",
RcodeNotZone: "NOTZONE",
RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891
// RcodeBadVers: "BADVERS",
RcodeBadKey: "BADKEY",
RcodeBadTime: "BADTIME",
RcodeBadMode: "BADMODE",
RcodeBadName: "BADNAME",
RcodeBadAlg: "BADALG",
RcodeBadTrunc: "BADTRUNC",
RcodeBadKey: "BADKEY",
RcodeBadTime: "BADTIME",
RcodeBadMode: "BADMODE",
RcodeBadName: "BADNAME",
RcodeBadAlg: "BADALG",
RcodeBadTrunc: "BADTRUNC",
RcodeBadCookie: "BADCOOKIE",
}
// Rather than write the usual handful of routines to pack and
// unpack every message that can appear on the wire, we use
// reflection to write a generic pack/unpack for structs and then
// use it. Thus, if in the future we need to define new message
// structs, no new pack/unpack/printing code needs to be written.
// compressionMap is used to allow a more efficient compression map
// to be used for internal packDomainName calls without changing the
// signature or functionality of public API.
//
// In particular, map[string]uint16 uses 25% less per-entry memory
// than does map[string]int.
type compressionMap struct {
ext map[string]int // external callers
int map[string]uint16 // internal callers
}
func (m compressionMap) valid() bool {
return m.int != nil || m.ext != nil
}
func (m compressionMap) insert(s string, pos int) {
if m.ext != nil {
m.ext[s] = pos
} else {
m.int[s] = uint16(pos)
}
}
func (m compressionMap) find(s string) (int, bool) {
if m.ext != nil {
pos, ok := m.ext[s]
return pos, ok
}
pos, ok := m.int[s]
return int(pos), ok
}
// Domain names are a sequence of counted strings
// split at the dots. They end with a zero-length string.
......@@ -235,147 +199,161 @@ var RcodeToString = map[int]string{
// map needs to hold a mapping between domain names and offsets
// pointing into msg.
func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
off1, _, err = packDomainName(s, msg, off, compression, compress)
return
return packDomainName(s, msg, off, compressionMap{ext: compression}, compress)
}
func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) {
// special case if msg == nil
lenmsg := 256
if msg != nil {
lenmsg = len(msg)
}
func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
// XXX: A logical copy of this function exists in IsDomainName and
// should be kept in sync with this function.
ls := len(s)
if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
return off, 0, nil
}
// If not fully qualified, error out, but only if msg == nil #ugly
switch {
case msg == nil:
if s[ls-1] != '.' {
s += "."
ls++
}
case msg != nil:
if s[ls-1] != '.' {
return lenmsg, 0, ErrFqdn
}
return off, nil
}
// If not fully qualified, error out.
if !IsFqdn(s) {
return len(msg), ErrFqdn
}
// Each dot ends a segment of the name.
// We trade each dot byte for a length byte.
// Except for escaped dots (\.), which are normal dots.
// There is also a trailing zero.
// Compression
nameoffset := -1
pointer := -1
// Emit sequence of counted strings, chopping at dots.
begin := 0
bs := []byte(s)
roBs, bsFresh, escapedDot := s, true, false
var (
begin int
compBegin int
compOff int
bs []byte
wasDot bool
)
loop:
for i := 0; i < ls; i++ {
if bs[i] == '\\' {
for j := i; j < ls-1; j++ {
bs[j] = bs[j+1]
var c byte
if bs == nil {
c = s[i]
} else {
c = bs[i]
}
switch c {
case '\\':
if off+1 > len(msg) {
return len(msg), ErrBuf
}
ls--
if off+1 > lenmsg {
return lenmsg, labels, ErrBuf
if bs == nil {
bs = []byte(s)
}
// check for \DDD
if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
bs[i] = dddToByte(bs[i:])
for j := i + 1; j < ls-2; j++ {
bs[j] = bs[j+2]
}
ls -= 2
} else if bs[i] == 't' {
bs[i] = '\t'
} else if bs[i] == 'r' {
bs[i] = '\r'
} else if bs[i] == 'n' {
bs[i] = '\n'
if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) {
bs[i] = dddToByte(bs[i+1:])
copy(bs[i+1:ls-3], bs[i+4:])
ls -= 3
compOff += 3
} else {
copy(bs[i:ls-1], bs[i+1:])
ls--
compOff++
}
escapedDot = bs[i] == '.'
bsFresh = false
continue
}
if bs[i] == '.' {
if i > 0 && bs[i-1] == '.' && !escapedDot {
wasDot = false
case '.':
if i == 0 && len(s) > 1 {
// leading dots are not legal except for the root zone
return len(msg), ErrRdata
}
if wasDot {
// two dots back to back is not legal
return lenmsg, labels, ErrRdata
return len(msg), ErrRdata
}
if i-begin >= 1<<6 { // top two bits of length must be clear
return lenmsg, labels, ErrRdata
wasDot = true
labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear
return len(msg), ErrRdata
}
// off can already (we're in a loop) be bigger than len(msg)
// this happens when a name isn't fully qualified
if off+1 > lenmsg {
return lenmsg, labels, ErrBuf
if off+1+labelLen > len(msg) {
return len(msg), ErrBuf
}
if msg != nil {
msg[off] = byte(i - begin)
}
offset := off
off++
for j := begin; j < i; j++ {
if off+1 > lenmsg {
return lenmsg, labels, ErrBuf
}
if msg != nil {
msg[off] = bs[j]
}
off++
}
if compress && !bsFresh {
roBs = string(bs)
bsFresh = true
}
// Dont try to compress '.'
if compress && roBs[begin:] != "." {
if p, ok := compression[roBs[begin:]]; !ok {
// Only offsets smaller than this can be used.
if offset < maxCompressionOffset {
compression[roBs[begin:]] = offset
}
} else {
// Don't try to compress '.'
// We should only compress when compress is true, but we should also still pick
// up names that can be used for *future* compression(s).
if compression.valid() && !isRootLabel(s, bs, begin, ls) {
if p, ok := compression.find(s[compBegin:]); ok {
// The first hit is the longest matching dname
// keep the pointer offset we get back and store
// the offset of the current name, because that's
// where we need to insert the pointer later
// If compress is true, we're allowed to compress this dname
if pointer == -1 && compress {
pointer = p // Where to point to
nameoffset = offset // Where to point from
break
if compress {
pointer = p // Where to point to
break loop
}
} else if off < maxCompressionOffset {
// Only offsets smaller than maxCompressionOffset can be used.
compression.insert(s[compBegin:], off)
}
}
labels++
// The following is covered by the length check above.
msg[off] = byte(labelLen)
if bs == nil {
copy(msg[off+1:], s[begin:i])
} else {
copy(msg[off+1:], bs[begin:i])
}
off += 1 + labelLen
begin = i + 1
compBegin = begin + compOff
default:
wasDot = false
}
escapedDot = false
}
// Root label is special
if len(bs) == 1 && bs[0] == '.' {
return off, labels, nil
if isRootLabel(s, bs, 0, ls) {
return off, nil
}
// If we did compression and we find something add the pointer here
if pointer != -1 {
// We have two bytes (14 bits) to put the pointer in
// if msg == nil, we will never do compression
msg[nameoffset], msg[nameoffset+1] = packUint16(uint16(pointer ^ 0xC000))
off = nameoffset + 1
goto End
binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000))
return off + 2, nil
}
if msg != nil {
if off < len(msg) {
msg[off] = 0
}
End:
off++
return off, labels, nil
return off + 1, nil
}
// isRootLabel returns whether s or bs, from off to end, is the root
// label ".".
//
// If bs is nil, s will be checked, otherwise bs will be checked.
func isRootLabel(s string, bs []byte, off, end int) bool {
if bs == nil {
return s[off:end] == "."
}
return end-off == 1 && bs[off] == '.'
}
// Unpack a domain name.
......@@ -392,11 +370,16 @@ End:
// In theory, the pointers are only allowed to jump backward.
// We let them jump anywhere and stop jumping after a while.
// UnpackDomainName unpacks a domain name into a string.
// UnpackDomainName unpacks a domain name into a string. It returns
// the name, the new offset into msg and any error that occurred.
//
// When an error is encountered, the unpacked name will be discarded
// and len(msg) will be returned as the offset.
func UnpackDomainName(msg []byte, off int) (string, int, error) {
s := make([]byte, 0, 64)
s := make([]byte, 0, maxDomainNamePresentationLength)
off1 := 0
lenmsg := len(msg)
budget := maxDomainNameWireOctets
ptr := 0 // number of pointers followed
Loop:
for {
......@@ -415,30 +398,17 @@ Loop:
if off+c > lenmsg {
return "", lenmsg, ErrBuf
}
for j := off; j < off+c; j++ {
switch b := msg[j]; b {
case '.', '(', ')', ';', ' ', '@':
fallthrough
case '"', '\\':
budget -= c + 1 // +1 for the label separator
if budget <= 0 {
return "", lenmsg, ErrLongDomain
}
for _, b := range msg[off : off+c] {
if isDomainNameLabelSpecial(b) {
s = append(s, '\\', b)
case '\t':
s = append(s, '\\', 't')
case '\r':
s = append(s, '\\', 'r')
default:
if b < 32 || b >= 127 { // unprintable use \DDD
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
} else {
s = append(s, b)
}
} else if b < ' ' || b > '~' {
s = append(s, escapeByte(b)...)
} else {
s = append(s, b)
}
}
s = append(s, '.')
......@@ -457,9 +427,12 @@ Loop:
if ptr == 0 {
off1 = off
}
if ptr++; ptr > 10 {
if ptr++; ptr > maxCompressionPointers {
return "", lenmsg, &Error{err: "too many compression pointers"}
}
// pointer should guarantee that it advances and points forwards at least
// but the condition on previous three lines guarantees that it's
// at least loop-free
off = (c^0xC0)<<8 | int(c1)
default:
// 0x80 and 0x40 are reserved
......@@ -470,13 +443,12 @@ Loop:
off1 = off
}
if len(s) == 0 {
s = []byte(".")
return ".", off1, nil
}
return string(s), off1, nil
}
func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
var err error
if len(txt) == 0 {
if offset >= len(msg) {
return offset, ErrBuf
......@@ -484,21 +456,22 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
msg[offset] = 0
return offset, nil
}
for i := range txt {
if len(txt[i]) > len(tmp) {
var err error
for _, s := range txt {
if len(s) > len(tmp) {
return offset, ErrBuf
}
offset, err = packTxtString(txt[i], msg, offset, tmp)
offset, err = packTxtString(s, msg, offset, tmp)
if err != nil {
return offset, err
}
}
return offset, err
return offset, nil
}
func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
lenByteOffset := offset
if offset >= len(msg) {
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
offset++
......@@ -517,12 +490,6 @@ func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else if bs[i] == 't' {
msg[offset] = '\t'
} else if bs[i] == 'r' {
msg[offset] = '\r'
} else if bs[i] == 'n' {
msg[offset] = '\n'
} else {
msg[offset] = bs[i]
}
......@@ -540,7 +507,7 @@ func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
}
func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
if offset >= len(msg) {
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
bs := tmp[:len(s)]
......@@ -573,7 +540,7 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
off = off0
var s string
for off < len(msg) && err == nil {
s, off, err = unpackTxtString(msg, off)
s, off, err = unpackString(msg, off)
if err == nil {
ss = append(ss, s)
}
......@@ -581,814 +548,17 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
return
}
func unpackTxtString(msg []byte, offset int) (string, int, error) {
if offset+1 > len(msg) {
return "", offset, &Error{err: "overflow unpacking txt"}
}
l := int(msg[offset])
if offset+l+1 > len(msg) {
return "", offset, &Error{err: "overflow unpacking txt"}
}
s := make([]byte, 0, l)
for _, b := range msg[offset+1 : offset+1+l] {
switch b {
case '"', '\\':
s = append(s, '\\', b)
case '\t':
s = append(s, `\t`...)
case '\r':
s = append(s, `\r`...)
case '\n':
s = append(s, `\n`...)
default:
if b < 32 || b > 127 { // unprintable
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
} else {
s = append(s, b)
}
}
}
offset += 1 + l
return string(s), offset, nil
}
// Pack a reflect.StructValue into msg. Struct members can only be uint8, uint16, uint32, string,
// slices and other (often anonymous) structs.
func packStructValue(val reflect.Value, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
var txtTmp []byte
lenmsg := len(msg)
numfield := val.NumField()
for i := 0; i < numfield; i++ {
typefield := val.Type().Field(i)
if typefield.Tag == `dns:"-"` {
continue
}
switch fv := val.Field(i); fv.Kind() {
default:
return lenmsg, &Error{err: "bad kind packing"}
case reflect.Interface:
// PrivateRR is the only RR implementation that has interface field.
// therefore it's expected that this interface would be PrivateRdata
switch data := fv.Interface().(type) {
case PrivateRdata:
n, err := data.Pack(msg[off:])
if err != nil {
return lenmsg, err
}
off += n
default:
return lenmsg, &Error{err: "bad kind interface packing"}
}
case reflect.Slice:
switch typefield.Tag {
default:
return lenmsg, &Error{"bad tag packing slice: " + typefield.Tag.Get("dns")}
case `dns:"domain-name"`:
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).String()
off, err = PackDomainName(element, msg, off, compression, false && compress)
if err != nil {
return lenmsg, err
}
}
case `dns:"txt"`:
if txtTmp == nil {
txtTmp = make([]byte, 256*4+1)
}
off, err = packTxt(fv.Interface().([]string), msg, off, txtTmp)
if err != nil {
return lenmsg, err
}
case `dns:"opt"`: // edns
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).Interface()
b, e := element.(EDNS0).pack()
if e != nil {
return lenmsg, &Error{err: "overflow packing opt"}
}
// Option code
msg[off], msg[off+1] = packUint16(element.(EDNS0).Option())
// Length
msg[off+2], msg[off+3] = packUint16(uint16(len(b)))
off += 4
if off+len(b) > lenmsg {
copy(msg[off:], b)
off = lenmsg
continue
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
case `dns:"a"`:
if val.Type().String() == "dns.IPSECKEY" {
// Field(2) is GatewayType, must be 1
if val.Field(2).Uint() != 1 {
continue
}
}
// It must be a slice of 4, even if it is 16, we encode
// only the first 4
if off+net.IPv4len > lenmsg {
return lenmsg, &Error{err: "overflow packing a"}
}
switch fv.Len() {
case net.IPv6len:
msg[off] = byte(fv.Index(12).Uint())
msg[off+1] = byte(fv.Index(13).Uint())
msg[off+2] = byte(fv.Index(14).Uint())
msg[off+3] = byte(fv.Index(15).Uint())
off += net.IPv4len
case net.IPv4len:
msg[off] = byte(fv.Index(0).Uint())
msg[off+1] = byte(fv.Index(1).Uint())
msg[off+2] = byte(fv.Index(2).Uint())
msg[off+3] = byte(fv.Index(3).Uint())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates
default:
return lenmsg, &Error{err: "overflow packing a"}
}
case `dns:"aaaa"`:
if val.Type().String() == "dns.IPSECKEY" {
// Field(2) is GatewayType, must be 2
if val.Field(2).Uint() != 2 {
continue
}
}
if fv.Len() == 0 {
break
}
if fv.Len() > net.IPv6len || off+fv.Len() > lenmsg {
return lenmsg, &Error{err: "overflow packing aaaa"}
}
for j := 0; j < net.IPv6len; j++ {
msg[off] = byte(fv.Index(j).Uint())
off++
}
case `dns:"wks"`:
// TODO(miek): this is wrong should be lenrd
if off == lenmsg {
break // dyn. updates
}
if val.Field(i).Len() == 0 {
break
}
off1 := off
for j := 0; j < val.Field(i).Len(); j++ {
serv := int(fv.Index(j).Uint())
if off+serv/8+1 > len(msg) {
return len(msg), &Error{err: "overflow packing wks"}
}
msg[off+serv/8] |= byte(1 << (7 - uint(serv%8)))
if off+serv/8+1 > off1 {
off1 = off + serv/8 + 1
}
}
off = off1
case `dns:"nsec"`: // NSEC/NSEC3
// This is the uint16 type bitmap
if val.Field(i).Len() == 0 {
// Do absolutely nothing
break
}
var lastwindow, lastlength uint16
for j := 0; j < val.Field(i).Len(); j++ {
t := uint16(fv.Index(j).Uint())
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 {
// New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - (t % 8)))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
}
case reflect.Struct:
off, err = packStructValue(fv, msg, off, compression, compress)
if err != nil {
return lenmsg, err
}
case reflect.Uint8:
if off+1 > lenmsg {
return lenmsg, &Error{err: "overflow packing uint8"}
}
msg[off] = byte(fv.Uint())
off++
case reflect.Uint16:
if off+2 > lenmsg {
return lenmsg, &Error{err: "overflow packing uint16"}
}
i := fv.Uint()
msg[off] = byte(i >> 8)
msg[off+1] = byte(i)
off += 2
case reflect.Uint32:
if off+4 > lenmsg {
return lenmsg, &Error{err: "overflow packing uint32"}
}
i := fv.Uint()
msg[off] = byte(i >> 24)
msg[off+1] = byte(i >> 16)
msg[off+2] = byte(i >> 8)
msg[off+3] = byte(i)
off += 4
case reflect.Uint64:
switch typefield.Tag {
default:
if off+8 > lenmsg {
return lenmsg, &Error{err: "overflow packing uint64"}
}
i := fv.Uint()
msg[off] = byte(i >> 56)
msg[off+1] = byte(i >> 48)
msg[off+2] = byte(i >> 40)
msg[off+3] = byte(i >> 32)
msg[off+4] = byte(i >> 24)
msg[off+5] = byte(i >> 16)
msg[off+6] = byte(i >> 8)
msg[off+7] = byte(i)
off += 8
case `dns:"uint48"`:
// Used in TSIG, where it stops at 48 bits, so we discard the upper 16
if off+6 > lenmsg {
return lenmsg, &Error{err: "overflow packing uint64 as uint48"}
}
i := fv.Uint()
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
}
case reflect.String:
// There are multiple string encodings.
// The tag distinguishes ordinary strings from domain names.
s := fv.String()
switch typefield.Tag {
default:
return lenmsg, &Error{"bad tag packing string: " + typefield.Tag.Get("dns")}
case `dns:"base64"`:
b64, e := fromBase64([]byte(s))
if e != nil {
return lenmsg, e
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
case `dns:"domain-name"`:
if val.Type().String() == "dns.IPSECKEY" {
// Field(2) is GatewayType, 1 and 2 or used for addresses
x := val.Field(2).Uint()
if x == 1 || x == 2 {
continue
}
}
if off, err = PackDomainName(s, msg, off, compression, false && compress); err != nil {
return lenmsg, err
}
case `dns:"cdomain-name"`:
if off, err = PackDomainName(s, msg, off, compression, true && compress); err != nil {
return lenmsg, err
}
case `dns:"size-base32"`:
// This is purely for NSEC3 atm, the previous byte must
// holds the length of the encoded string. As NSEC3
// is only defined to SHA1, the hashlength is 20 (160 bits)
msg[off-1] = 20
fallthrough
case `dns:"base32"`:
b32, e := fromBase32([]byte(s))
if e != nil {
return lenmsg, e
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
case `dns:"size-hex"`:
fallthrough
case `dns:"hex"`:
// There is no length encoded here
h, e := hex.DecodeString(s)
if e != nil {
return lenmsg, e
}
if off+hex.DecodedLen(len(s)) > lenmsg {
return lenmsg, &Error{err: "overflow packing hex"}
}
copy(msg[off:off+hex.DecodedLen(len(s))], h)
off += hex.DecodedLen(len(s))
case `dns:"size"`:
// the size is already encoded in the RR, we can safely use the
// length of string. String is RAW (not encoded in hex, nor base64)
copy(msg[off:off+len(s)], s)
off += len(s)
case `dns:"octet"`:
bytesTmp := make([]byte, 256)
off, err = packOctetString(fv.String(), msg, off, bytesTmp)
if err != nil {
return lenmsg, err
}
case `dns:"txt"`:
fallthrough
case "":
if txtTmp == nil {
txtTmp = make([]byte, 256*4+1)
}
off, err = packTxtString(fv.String(), msg, off, txtTmp)
if err != nil {
return lenmsg, err
}
}
}
}
return off, nil
}
func structValue(any interface{}) reflect.Value {
return reflect.ValueOf(any).Elem()
}
// PackStruct packs any structure to wire format.
func PackStruct(any interface{}, msg []byte, off int) (off1 int, err error) {
off, err = packStructValue(structValue(any), msg, off, nil, false)
return off, err
}
func packStructCompress(any interface{}, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
off, err = packStructValue(structValue(any), msg, off, compression, compress)
return off, err
}
// Unpack a reflect.StructValue from msg.
// Same restrictions as packStructValue.
func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err error) {
lenmsg := len(msg)
for i := 0; i < val.NumField(); i++ {
if off > lenmsg {
return lenmsg, &Error{"bad offset unpacking"}
}
switch fv := val.Field(i); fv.Kind() {
default:
return lenmsg, &Error{err: "bad kind unpacking"}
case reflect.Interface:
// PrivateRR is the only RR implementation that has interface field.
// therefore it's expected that this interface would be PrivateRdata
switch data := fv.Interface().(type) {
case PrivateRdata:
n, err := data.Unpack(msg[off:])
if err != nil {
return lenmsg, err
}
off += n
default:
return lenmsg, &Error{err: "bad kind interface unpacking"}
}
case reflect.Slice:
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{"bad tag unpacking slice: " + val.Type().Field(i).Tag.Get("dns")}
case `dns:"domain-name"`:
// HIP record slice of name (or none)
var servers []string
var s string
for off < lenmsg {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return lenmsg, err
}
servers = append(servers, s)
}
fv.Set(reflect.ValueOf(servers))
case `dns:"txt"`:
if off == lenmsg {
break
}
var txt []string
txt, off, err = unpackTxt(msg, off)
if err != nil {
return lenmsg, err
}
fv.Set(reflect.ValueOf(txt))
case `dns:"opt"`: // edns0
if off == lenmsg {
// This is an EDNS0 (OPT Record) with no rdata
// We can safely return here.
break
}
var edns []EDNS0
Option:
code := uint16(0)
if off+4 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking opt"}
}
code, off = unpackUint16(msg, off)
optlen, off1 := unpackUint16(msg, off)
if off1+int(optlen) > lenmsg {
return lenmsg, &Error{err: "overflow unpacking opt"}
}
switch code {
case EDNS0NSID:
e := new(EDNS0_NSID)
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0SUBNET, EDNS0SUBNETDRAFT:
e := new(EDNS0_SUBNET)
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
if code == EDNS0SUBNETDRAFT {
e.DraftOption = true
}
case EDNS0UL:
e := new(EDNS0_UL)
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0LLQ:
e := new(EDNS0_LLQ)
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0DAU:
e := new(EDNS0_DAU)
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0DHU:
e := new(EDNS0_DHU)
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0N3U:
e := new(EDNS0_N3U)
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
default:
e := new(EDNS0_LOCAL)
e.Code = code
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
}
if off < lenmsg {
goto Option
}
fv.Set(reflect.ValueOf(edns))
case `dns:"a"`:
if val.Type().String() == "dns.IPSECKEY" {
// Field(2) is GatewayType, must be 1
if val.Field(2).Uint() != 1 {
continue
}
}
if off == lenmsg {
break // dyn. update
}
if off+net.IPv4len > lenmsg {
return lenmsg, &Error{err: "overflow unpacking a"}
}
fv.Set(reflect.ValueOf(net.IPv4(msg[off], msg[off+1], msg[off+2], msg[off+3])))
off += net.IPv4len
case `dns:"aaaa"`:
if val.Type().String() == "dns.IPSECKEY" {
// Field(2) is GatewayType, must be 2
if val.Field(2).Uint() != 2 {
continue
}
}
if off == lenmsg {
break
}
if off+net.IPv6len > lenmsg {
return lenmsg, &Error{err: "overflow unpacking aaaa"}
}
fv.Set(reflect.ValueOf(net.IP{msg[off], msg[off+1], msg[off+2], msg[off+3], msg[off+4],
msg[off+5], msg[off+6], msg[off+7], msg[off+8], msg[off+9], msg[off+10],
msg[off+11], msg[off+12], msg[off+13], msg[off+14], msg[off+15]}))
off += net.IPv6len
case `dns:"wks"`:
// Rest of the record is the bitmap
var serv []uint16
j := 0
for off < lenmsg {
if off+1 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking wks"}
}
b := msg[off]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
serv = append(serv, uint16(j*8+0))
}
if b&0x40 == 0x40 {
serv = append(serv, uint16(j*8+1))
}
if b&0x20 == 0x20 {
serv = append(serv, uint16(j*8+2))
}
if b&0x10 == 0x10 {
serv = append(serv, uint16(j*8+3))
}
if b&0x8 == 0x8 {
serv = append(serv, uint16(j*8+4))
}
if b&0x4 == 0x4 {
serv = append(serv, uint16(j*8+5))
}
if b&0x2 == 0x2 {
serv = append(serv, uint16(j*8+6))
}
if b&0x1 == 0x1 {
serv = append(serv, uint16(j*8+7))
}
j++
off++
}
fv.Set(reflect.ValueOf(serv))
case `dns:"nsec"`: // NSEC/NSEC3
if off == len(msg) {
break
}
// Rest of the record is the type bitmap
var nsec []uint16
length := 0
window := 0
lastwindow := -1
for off < len(msg) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow unpacking nsecx"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return len(msg), &Error{err: "out of order NSEC block"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return len(msg), &Error{err: "empty NSEC block"}
}
if length > 32 {
return len(msg), &Error{err: "NSEC block too long"}
}
if off+length > len(msg) {
return len(msg), &Error{err: "overflowing NSEC block"}
}
// Walk the bytes in the window and extract the type bits
for j := 0; j < length; j++ {
b := msg[off+j]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
fv.Set(reflect.ValueOf(nsec))
}
case reflect.Struct:
off, err = unpackStructValue(fv, msg, off)
if err != nil {
return lenmsg, err
}
if val.Type().Field(i).Name == "Hdr" {
lenrd := off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
if lenrd > lenmsg {
return lenmsg, &Error{err: "overflowing header size"}
}
msg = msg[:lenrd]
lenmsg = len(msg)
}
case reflect.Uint8:
if off == lenmsg {
break
}
if off+1 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking uint8"}
}
fv.SetUint(uint64(uint8(msg[off])))
off++
case reflect.Uint16:
if off == lenmsg {
break
}
var i uint16
if off+2 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking uint16"}
}
i, off = unpackUint16(msg, off)
fv.SetUint(uint64(i))
case reflect.Uint32:
if off == lenmsg {
break
}
if off+4 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking uint32"}
}
fv.SetUint(uint64(uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])))
off += 4
case reflect.Uint64:
if off == lenmsg {
break
}
switch val.Type().Field(i).Tag {
default:
if off+8 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking uint64"}
}
fv.SetUint(uint64(uint64(msg[off])<<56 | uint64(msg[off+1])<<48 | uint64(msg[off+2])<<40 |
uint64(msg[off+3])<<32 | uint64(msg[off+4])<<24 | uint64(msg[off+5])<<16 | uint64(msg[off+6])<<8 | uint64(msg[off+7])))
off += 8
case `dns:"uint48"`:
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
if off+6 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking uint64 as uint48"}
}
fv.SetUint(uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])))
off += 6
}
case reflect.String:
var s string
if off == lenmsg {
break
}
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{"bad tag unpacking string: " + val.Type().Field(i).Tag.Get("dns")}
case `dns:"octet"`:
s = string(msg[off:])
off = lenmsg
case `dns:"hex"`:
hexend := lenmsg
if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) {
hexend = off + int(val.FieldByName("HitLength").Uint())
}
if hexend > lenmsg {
return lenmsg, &Error{err: "overflow unpacking HIP hex"}
}
s = hex.EncodeToString(msg[off:hexend])
off = hexend
case `dns:"base64"`:
// Rest of the RR is base64 encoded value
b64end := lenmsg
if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) {
b64end = off + int(val.FieldByName("PublicKeyLength").Uint())
}
if b64end > lenmsg {
return lenmsg, &Error{err: "overflow unpacking HIP base64"}
}
s = toBase64(msg[off:b64end])
off = b64end
case `dns:"cdomain-name"`:
fallthrough
case `dns:"domain-name"`:
if val.Type().String() == "dns.IPSECKEY" {
// Field(2) is GatewayType, 1 and 2 or used for addresses
x := val.Field(2).Uint()
if x == 1 || x == 2 {
continue
}
}
if off == lenmsg {
// zero rdata foo, OK for dyn. updates
break
}
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return lenmsg, err
}
case `dns:"size-base32"`:
var size int
switch val.Type().Name() {
case "NSEC3":
switch val.Type().Field(i).Name {
case "NextDomain":
name := val.FieldByName("HashLength")
size = int(name.Uint())
}
}
if off+size > lenmsg {
return lenmsg, &Error{err: "overflow unpacking base32"}
}
s = toBase32(msg[off : off+size])
off += size
case `dns:"size-hex"`:
// a "size" string, but it must be encoded in hex in the string
var size int
switch val.Type().Name() {
case "NSEC3":
switch val.Type().Field(i).Name {
case "Salt":
name := val.FieldByName("SaltLength")
size = int(name.Uint())
case "NextDomain":
name := val.FieldByName("HashLength")
size = int(name.Uint())
}
case "TSIG":
switch val.Type().Field(i).Name {
case "MAC":
name := val.FieldByName("MACSize")
size = int(name.Uint())
case "OtherData":
name := val.FieldByName("OtherLen")
size = int(name.Uint())
}
}
if off+size > lenmsg {
return lenmsg, &Error{err: "overflow unpacking hex"}
}
s = hex.EncodeToString(msg[off : off+size])
off += size
case `dns:"txt"`:
fallthrough
case "":
s, off, err = unpackTxtString(msg, off)
}
fv.SetString(s)
}
}
return off, nil
}
// Helpers for dealing with escaped bytes
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
func dddToByte(s []byte) byte {
_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
// UnpackStruct unpacks a binary message from offset off to the interface
// value given.
func UnpackStruct(any interface{}, msg []byte, off int) (int, error) {
return unpackStructValue(structValue(any), msg, off)
func dddStringToByte(s string) byte {
_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
// Helper function for packing and unpacking
......@@ -1402,101 +572,110 @@ func intToBytes(i *big.Int, length int) []byte {
return buf
}
func unpackUint16(msg []byte, off int) (uint16, int) {
return uint16(msg[off])<<8 | uint16(msg[off+1]), off + 2
}
func packUint16(i uint16) (byte, byte) {
return byte(i >> 8), byte(i)
}
func toBase32(b []byte) string {
return base32.HexEncoding.EncodeToString(b)
}
func fromBase32(s []byte) (buf []byte, err error) {
buflen := base32.HexEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32.HexEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
// PackRR packs a resource record rr into msg[off:].
// See PackDomainName for documentation about the compression.
func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress)
if err == nil {
// packRR no longer sets the Rdlength field on the rr, but
// callers might be expecting it so we set it here.
rr.Header().Rdlength = uint16(off1 - headerEnd)
}
return off1, err
}
func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) {
if rr == nil {
return len(msg), &Error{err: "nil rr"}
return len(msg), len(msg), &Error{err: "nil rr"}
}
off1, err = packStructCompress(rr, msg, off, compression, compress)
headerEnd, err = rr.Header().packHeader(msg, off, compression, compress)
if err != nil {
return len(msg), err
return headerEnd, len(msg), err
}
if rawSetRdlength(msg, off, off1) {
return off1, nil
off1, err = rr.pack(msg, headerEnd, compression, compress)
if err != nil {
return headerEnd, len(msg), err
}
rdlength := off1 - headerEnd
if int(uint16(rdlength)) != rdlength { // overflow
return headerEnd, len(msg), ErrRdata
}
return off, ErrRdata
// The RDLENGTH field is the last field in the header and we set it here.
binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength))
return headerEnd, off1, nil
}
// UnpackRR unpacks msg[off:] into an RR.
func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
// unpack just the header, to find the rr type and length
var h RR_Header
off0 := off
if off, err = UnpackStruct(&h, msg, off); err != nil {
h, off, msg, err := unpackHeader(msg, off)
if err != nil {
return nil, len(msg), err
}
end := off + int(h.Rdlength)
// make an rr of that type and re-unpack.
mk, known := typeToRR[h.Rrtype]
if !known {
rr = new(RFC3597)
return UnpackRRWithHeader(h, msg, off)
}
// UnpackRRWithHeader unpacks the record type specific payload given an existing
// RR_Header.
func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
if newFn, ok := TypeToRR[h.Rrtype]; ok {
rr = newFn()
*rr.Header() = h
} else {
rr = mk()
rr = &RFC3597{Hdr: h}
}
off, err = UnpackStruct(rr, msg, off0)
if off != end {
if off < 0 || off > len(msg) {
return &h, off, &Error{err: "bad off"}
}
end := off + int(h.Rdlength)
if end < off || end > len(msg) {
return &h, end, &Error{err: "bad rdlength"}
}
return rr, off, err
}
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8)
for u, s := range m {
n[s] = u
if noRdata(h) {
return rr, off, nil
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16)
for u, s := range m {
n[s] = u
off, err = rr.unpack(msg, off)
if err != nil {
return nil, end, err
}
if off != end {
return &h, end, &Error{err: "bad rdlength"}
}
return n
return rr, off, nil
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int)
for u, s := range m {
n[s] = u
// unpackRRslice unpacks msg[off:] into an []RR.
// If we cannot unpack the whole array, then it will return nil
func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
var r RR
// Don't pre-allocate, l may be under attacker control
var dst []RR
for i := 0; i < l; i++ {
off1 := off
r, off, err = UnpackRR(msg, off)
if err != nil {
off = len(msg)
break
}
// If offset does not increase anymore, l is a lie
if off1 == off {
break
}
dst = append(dst, r)
}
if err != nil && off == len(msg) {
dst = nil
}
return n
return dst, off, err
}
// Convert a MsgHdr to a string, with dig-like headers:
......@@ -1549,31 +728,37 @@ func (dns *Msg) Pack() (msg []byte, err error) {
return dns.PackBuffer(nil)
}
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small
// a new buffer is allocated.
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
var dh Header
var compression map[string]int
if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings
// If this message can't be compressed, avoid filling the
// compression map and creating garbage.
if dns.Compress && dns.isCompressible() {
compression := make(map[string]uint16) // Compression pointer mappings.
return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true)
}
return dns.packBufferWithCompressionMap(buf, compressionMap{}, false)
}
// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) {
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode
}
if dns.Rcode > 0xF {
// Regular RCODE field is 4 bits
opt := dns.IsEdns0()
if opt == nil {
return nil, ErrExtendedRcode
}
opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
dns.Rcode &= 0xF
// Set extended rcode unconditionally if we have an opt, this will allow
// resetting the extended rcode bits if they need to.
if opt := dns.IsEdns0(); opt != nil {
opt.SetExtendedRcode(uint16(dns.Rcode))
} else if dns.Rcode > 0xF {
// If Rcode is an extended one and opt is nil, error out.
return nil, ErrExtendedRcode
}
// Convert convenient Msg into wire-like Header.
var dh Header
dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
if dns.Response {
dh.Bits |= _QR
}
......@@ -1599,52 +784,44 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
dh.Bits |= _CD
}
// Prepare variable sized arrays.
question := dns.Question
answer := dns.Answer
ns := dns.Ns
extra := dns.Extra
dh.Qdcount = uint16(len(question))
dh.Ancount = uint16(len(answer))
dh.Nscount = uint16(len(ns))
dh.Arcount = uint16(len(extra))
dh.Qdcount = uint16(len(dns.Question))
dh.Ancount = uint16(len(dns.Answer))
dh.Nscount = uint16(len(dns.Ns))
dh.Arcount = uint16(len(dns.Extra))
// We need the uncompressed length here, because we first pack it and then compress it.
msg = buf
compress := dns.Compress
dns.Compress = false
if packLen := dns.Len() + 1; len(msg) < packLen {
uncompressedLen := msgLenWithCompressionMap(dns, nil)
if packLen := uncompressedLen + 1; len(msg) < packLen {
msg = make([]byte, packLen)
}
dns.Compress = compress
// Pack it in: header and then the pieces.
off := 0
off, err = packStructCompress(&dh, msg, off, compression, dns.Compress)
off, err = dh.pack(msg, off, compression, compress)
if err != nil {
return nil, err
}
for i := 0; i < len(question); i++ {
off, err = packStructCompress(&question[i], msg, off, compression, dns.Compress)
for _, r := range dns.Question {
off, err = r.pack(msg, off, compression, compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(answer); i++ {
off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
for _, r := range dns.Answer {
_, off, err = packRR(r, msg, off, compression, compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(ns); i++ {
off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
for _, r := range dns.Ns {
_, off, err = packRR(r, msg, off, compression, compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(extra); i++ {
off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
for _, r := range dns.Extra {
_, off, err = packRR(r, msg, off, compression, compress)
if err != nil {
return nil, err
}
......@@ -1652,36 +829,24 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
return msg[:off], nil
}
// Unpack unpacks a binary message to a Msg structure.
func (dns *Msg) Unpack(msg []byte) (err error) {
// Header.
var dh Header
off := 0
if off, err = UnpackStruct(&dh, msg, off); err != nil {
return err
func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
// If we are at the end of the message we should return *just* the
// header. This can still be useful to the caller. 9.9.9.9 sends these
// when responding with REFUSED for instance.
if off == len(msg) {
// reset sections before returning
dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil
return nil
}
dns.Id = dh.Id
dns.Response = (dh.Bits & _QR) != 0
dns.Opcode = int(dh.Bits>>11) & 0xF
dns.Authoritative = (dh.Bits & _AA) != 0
dns.Truncated = (dh.Bits & _TC) != 0
dns.RecursionDesired = (dh.Bits & _RD) != 0
dns.RecursionAvailable = (dh.Bits & _RA) != 0
dns.Zero = (dh.Bits & _Z) != 0
dns.AuthenticatedData = (dh.Bits & _AD) != 0
dns.CheckingDisabled = (dh.Bits & _CD) != 0
dns.Rcode = int(dh.Bits & 0xF)
// Don't pre-alloc these arrays, the incoming lengths are from the network.
dns.Question = make([]Question, 0, 1)
dns.Answer = make([]RR, 0, 10)
dns.Ns = make([]RR, 0, 10)
dns.Extra = make([]RR, 0, 10)
var q Question
// Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are
// attacker controlled. This means we can't use them to pre-allocate
// slices.
dns.Question = nil
for i := 0; i < int(dh.Qdcount); i++ {
off1 := off
off, err = UnpackStruct(&q, msg, off)
var q Question
q, off, err = unpackQuestion(msg, off)
if err != nil {
return err
}
......@@ -1689,67 +854,46 @@ func (dns *Msg) Unpack(msg []byte) (err error) {
dh.Qdcount = uint16(i)
break
}
dns.Question = append(dns.Question, q)
}
// If we see a TC bit being set we return here, without
// an error, because technically it isn't an error. So return
// without parsing the potentially corrupt packet and hitting an error.
// TODO(miek): this isn't the best strategy!
// Better stragey would be: set boolean indicating truncated message, go forth and parse
// until we hit an error, return the message without the latest parsed rr if this boolean
// is true.
if dns.Truncated {
dns.Answer = nil
dns.Ns = nil
dns.Extra = nil
return nil
}
var r RR
for i := 0; i < int(dh.Ancount); i++ {
off1 := off
r, off, err = UnpackRR(msg, off)
if err != nil {
return err
}
if off1 == off { // Offset does not increase anymore, dh.Ancount is a lie!
dh.Ancount = uint16(i)
break
}
dns.Answer = append(dns.Answer, r)
dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off)
// The header counts might have been wrong so we need to update it
dh.Ancount = uint16(len(dns.Answer))
if err == nil {
dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off)
}
for i := 0; i < int(dh.Nscount); i++ {
off1 := off
r, off, err = UnpackRR(msg, off)
if err != nil {
return err
}
if off1 == off { // Offset does not increase anymore, dh.Nscount is a lie!
dh.Nscount = uint16(i)
break
}
dns.Ns = append(dns.Ns, r)
// The header counts might have been wrong so we need to update it
dh.Nscount = uint16(len(dns.Ns))
if err == nil {
dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
}
for i := 0; i < int(dh.Arcount); i++ {
off1 := off
r, off, err = UnpackRR(msg, off)
if err != nil {
return err
}
if off1 == off { // Offset does not increase anymore, dh.Arcount is a lie!
dh.Arcount = uint16(i)
break
}
dns.Extra = append(dns.Extra, r)
// The header counts might have been wrong so we need to update it
dh.Arcount = uint16(len(dns.Extra))
// Set extended Rcode
if opt := dns.IsEdns0(); opt != nil {
dns.Rcode |= opt.ExtendedRcode()
}
if off != len(msg) {
// TODO(miek) make this an error?
// use PackOpt to let people tell how detailed the error reporting should be?
// println("dns: extra bytes in dns packet", off, "<", len(msg))
}
return nil
return err
}
// Unpack unpacks a binary message to a Msg structure.
func (dns *Msg) Unpack(msg []byte) (err error) {
dh, off, err := unpackMsgHdr(msg, 0)
if err != nil {
return err
}
dns.setHdr(dh)
return dns.unpack(dh, msg, off)
}
// Convert a complete message to a string with dig-like output.
......@@ -1762,229 +906,158 @@ func (dns *Msg) String() string {
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
opt := dns.IsEdns0()
if opt != nil {
// OPT PSEUDOSECTION
s += opt.String() + "\n"
}
if len(dns.Question) > 0 {
s += "\n;; QUESTION SECTION:\n"
for i := 0; i < len(dns.Question); i++ {
s += dns.Question[i].String() + "\n"
for _, r := range dns.Question {
s += r.String() + "\n"
}
}
if len(dns.Answer) > 0 {
s += "\n;; ANSWER SECTION:\n"
for i := 0; i < len(dns.Answer); i++ {
if dns.Answer[i] != nil {
s += dns.Answer[i].String() + "\n"
for _, r := range dns.Answer {
if r != nil {
s += r.String() + "\n"
}
}
}
if len(dns.Ns) > 0 {
s += "\n;; AUTHORITY SECTION:\n"
for i := 0; i < len(dns.Ns); i++ {
if dns.Ns[i] != nil {
s += dns.Ns[i].String() + "\n"
for _, r := range dns.Ns {
if r != nil {
s += r.String() + "\n"
}
}
}
if len(dns.Extra) > 0 {
if len(dns.Extra) > 0 && (opt == nil || len(dns.Extra) > 1) {
s += "\n;; ADDITIONAL SECTION:\n"
for i := 0; i < len(dns.Extra); i++ {
if dns.Extra[i] != nil {
s += dns.Extra[i].String() + "\n"
for _, r := range dns.Extra {
if r != nil && r.Header().Rrtype != TypeOPT {
s += r.String() + "\n"
}
}
}
return s
}
// isCompressible returns whether the msg may be compressible.
func (dns *Msg) isCompressible() bool {
// If we only have one question, there is nothing we can ever compress.
return len(dns.Question) > 1 || len(dns.Answer) > 0 ||
len(dns.Ns) > 0 || len(dns.Extra) > 0
}
// Len returns the message length when in (un)compressed wire format.
// If dns.Compress is true compression it is taken into account. Len()
// is provided to be a faster way to get the size of the resulting packet,
// than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int {
// We always return one more than needed.
l := 12 // Message header is always 12 bytes
var compression map[string]int
if dns.Compress {
compression = make(map[string]int)
}
for i := 0; i < len(dns.Question); i++ {
l += dns.Question[i].len()
if dns.Compress {
compressionLenHelper(compression, dns.Question[i].Name)
}
// If this message can't be compressed, avoid filling the
// compression map and creating garbage.
if dns.Compress && dns.isCompressible() {
compression := make(map[string]struct{})
return msgLenWithCompressionMap(dns, compression)
}
for i := 0; i < len(dns.Answer); i++ {
l += dns.Answer[i].len()
if dns.Compress {
k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name)
if ok {
l += 1 - k
}
compressionLenHelper(compression, dns.Answer[i].Header().Name)
k, ok = compressionLenSearchType(compression, dns.Answer[i])
if ok {
l += 1 - k
}
compressionLenHelperType(compression, dns.Answer[i])
return msgLenWithCompressionMap(dns, nil)
}
func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int {
l := headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
for _, r := range dns.Answer {
if r != nil {
l += r.len(l, compression)
}
}
for i := 0; i < len(dns.Ns); i++ {
l += dns.Ns[i].len()
if dns.Compress {
k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name)
if ok {
l += 1 - k
}
compressionLenHelper(compression, dns.Ns[i].Header().Name)
k, ok = compressionLenSearchType(compression, dns.Ns[i])
if ok {
l += 1 - k
}
compressionLenHelperType(compression, dns.Ns[i])
for _, r := range dns.Ns {
if r != nil {
l += r.len(l, compression)
}
}
for i := 0; i < len(dns.Extra); i++ {
l += dns.Extra[i].len()
if dns.Compress {
k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name)
if ok {
l += 1 - k
}
compressionLenHelper(compression, dns.Extra[i].Header().Name)
k, ok = compressionLenSearchType(compression, dns.Extra[i])
if ok {
l += 1 - k
}
compressionLenHelperType(compression, dns.Extra[i])
for _, r := range dns.Extra {
if r != nil {
l += r.len(l, compression)
}
}
return l
}
// Put the parts of the name in the compression map.
func compressionLenHelper(c map[string]int, s string) {
pref := ""
lbs := Split(s)
for j := len(lbs) - 1; j >= 0; j-- {
pref = s[lbs[j]:]
if _, ok := c[pref]; !ok {
c[pref] = len(pref)
func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int {
if s == "" || s == "." {
return 1
}
escaped := strings.Contains(s, "\\")
if compression != nil && (compress || off < maxCompressionOffset) {
// compressionLenSearch will insert the entry into the compression
// map if it doesn't contain it.
if l, ok := compressionLenSearch(compression, s, off); ok && compress {
if escaped {
return escapedNameLen(s[:l]) + 2
}
return l + 2
}
}
}
// Look for each part in the compression map and returns its length,
// keep on searching so we get the longest match.
func compressionLenSearch(c map[string]int, s string) (int, bool) {
off := 0
end := false
if s == "" { // don't bork on bogus data
return 0, false
if escaped {
return escapedNameLen(s) + 1
}
for {
if _, ok := c[s[off:]]; ok {
return len(s[off:]), true
return len(s) + 1
}
func escapedNameLen(s string) int {
nameLen := len(s)
for i := 0; i < len(s); i++ {
if s[i] != '\\' {
continue
}
if end {
break
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
nameLen -= 3
i += 3
} else {
nameLen--
i++
}
off, end = NextLabel(s, off)
}
return 0, false
}
// TODO(miek): should add all types, because the all can be *used* for compression.
func compressionLenHelperType(c map[string]int, r RR) {
switch x := r.(type) {
case *NS:
compressionLenHelper(c, x.Ns)
case *MX:
compressionLenHelper(c, x.Mx)
case *CNAME:
compressionLenHelper(c, x.Target)
case *PTR:
compressionLenHelper(c, x.Ptr)
case *SOA:
compressionLenHelper(c, x.Ns)
compressionLenHelper(c, x.Mbox)
case *MB:
compressionLenHelper(c, x.Mb)
case *MG:
compressionLenHelper(c, x.Mg)
case *MR:
compressionLenHelper(c, x.Mr)
case *MF:
compressionLenHelper(c, x.Mf)
case *MD:
compressionLenHelper(c, x.Md)
case *RT:
compressionLenHelper(c, x.Host)
case *MINFO:
compressionLenHelper(c, x.Rmail)
compressionLenHelper(c, x.Email)
case *AFSDB:
compressionLenHelper(c, x.Hostname)
}
return nameLen
}
// Only search on compressing these types.
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
switch x := r.(type) {
case *NS:
return compressionLenSearch(c, x.Ns)
case *MX:
return compressionLenSearch(c, x.Mx)
case *CNAME:
return compressionLenSearch(c, x.Target)
case *PTR:
return compressionLenSearch(c, x.Ptr)
case *SOA:
k, ok := compressionLenSearch(c, x.Ns)
k1, ok1 := compressionLenSearch(c, x.Mbox)
if !ok && !ok1 {
return 0, false
func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) {
for off, end := 0, false; !end; off, end = NextLabel(s, off) {
if _, ok := c[s[off:]]; ok {
return off, true
}
return k + k1, true
case *MB:
return compressionLenSearch(c, x.Mb)
case *MG:
return compressionLenSearch(c, x.Mg)
case *MR:
return compressionLenSearch(c, x.Mr)
case *MF:
return compressionLenSearch(c, x.Mf)
case *MD:
return compressionLenSearch(c, x.Md)
case *RT:
return compressionLenSearch(c, x.Host)
case *MINFO:
k, ok := compressionLenSearch(c, x.Rmail)
k1, ok1 := compressionLenSearch(c, x.Email)
if !ok && !ok1 {
return 0, false
if msgOff+off < maxCompressionOffset {
c[s[off:]] = struct{}{}
}
return k + k1, true
case *AFSDB:
return compressionLenSearch(c, x.Hostname)
}
return 0, false
}
// id returns a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func id() uint16 {
return uint16(rand.Int()) ^ uint16(time.Now().Nanosecond())
return 0, false
}
// Copy returns a new RR which is a deep-copy of r.
func Copy(r RR) RR {
r1 := r.copy()
return r1
}
func Copy(r RR) RR { return r.copy() }
// Len returns the length (in octets) of the uncompressed RR in wire format.
func Len(r RR) int { return r.len(0, nil) }
// Copy returns a new *Msg which is a deep-copy of dns.
func (dns *Msg) Copy() *Msg {
return dns.CopyTo(new(Msg))
}
func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
func (dns *Msg) CopyTo(r1 *Msg) *Msg {
......@@ -1997,34 +1070,138 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg {
}
rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
var rri int
r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):]
r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):]
r1.Extra = rrArr[:0:len(dns.Extra)]
if len(dns.Answer) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Answer); i++ {
rrArr[rri] = dns.Answer[i].copy()
rri++
}
r1.Answer = rrArr[rrbegin:rri:rri]
for _, r := range dns.Answer {
r1.Answer = append(r1.Answer, r.copy())
}
if len(dns.Ns) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Ns); i++ {
rrArr[rri] = dns.Ns[i].copy()
rri++
}
r1.Ns = rrArr[rrbegin:rri:rri]
for _, r := range dns.Ns {
r1.Ns = append(r1.Ns, r.copy())
}
if len(dns.Extra) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Extra); i++ {
rrArr[rri] = dns.Extra[i].copy()
rri++
}
r1.Extra = rrArr[rrbegin:rri:rri]
for _, r := range dns.Extra {
r1.Extra = append(r1.Extra, r.copy())
}
return r1
}
func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
off, err := packDomainName(q.Name, msg, off, compression, compress)
if err != nil {
return off, err
}
off, err = packUint16(q.Qtype, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(q.Qclass, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func unpackQuestion(msg []byte, off int) (Question, int, error) {
var (
q Question
err error
)
q.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qtype, off, err = unpackUint16(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qclass, off, err = unpackUint16(msg, off)
if off == len(msg) {
return q, off, nil
}
return q, off, err
}
func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
off, err := packUint16(dh.Id, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Bits, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Qdcount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Ancount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Nscount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Arcount, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
var (
dh Header
err error
)
dh.Id, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Bits, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Qdcount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Ancount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Nscount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Arcount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
return dh, off, nil
}
// setHdr set the header in the dns using the binary data in dh.
func (dns *Msg) setHdr(dh Header) {
dns.Id = dh.Id
dns.Response = dh.Bits&_QR != 0
dns.Opcode = int(dh.Bits>>11) & 0xF
dns.Authoritative = dh.Bits&_AA != 0
dns.Truncated = dh.Bits&_TC != 0
dns.RecursionDesired = dh.Bits&_RD != 0
dns.RecursionAvailable = dh.Bits&_RA != 0
dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite.
dns.AuthenticatedData = dh.Bits&_AD != 0
dns.CheckingDisabled = dh.Bits&_CD != 0
dns.Rcode = int(dh.Bits & 0xF)
}
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"net"
"sort"
"strings"
)
// helper functions called from the generated zmsg.go
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
// packDataDomainName.
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv4len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking a"}
}
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
off += net.IPv4len
return a, off, nil
}
func packDataA(a net.IP, msg []byte, off int) (int, error) {
switch len(a) {
case net.IPv4len, net.IPv6len:
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
copy(msg[off:], a.To4())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates.
default:
return len(msg), &Error{err: "overflow packing a"}
}
return off, nil
}
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv6len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
}
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
off += net.IPv6len
return aaaa, off, nil
}
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
switch len(aaaa) {
case net.IPv6len:
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
copy(msg[off:], aaaa)
off += net.IPv6len
case 0:
// Allowed, dynamic updates.
default:
return len(msg), &Error{err: "overflow packing aaaa"}
}
return off, nil
}
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
// re-sliced msg according to the expected length of the RR.
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
hdr := RR_Header{}
if off == len(msg) {
return hdr, off, msg, nil
}
hdr.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rrtype, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Class, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Ttl, off, err = unpackUint32(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rdlength, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
return hdr, off, msg, err
}
// packHeader packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression.
func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
if off == len(msg) {
return off, nil
}
off, err := packDomainName(hdr.Name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rrtype, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Class, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint32(hdr.Ttl, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
if err != nil {
return len(msg), err
}
return off, nil
}
// helper helper functions.
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
// Returns an error if msg is smaller than the expected size.
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
lenrd := off + int(rdlength)
if lenrd > len(msg) {
return msg, &Error{err: "overflowing header size"}
}
return msg[:lenrd], nil
}
var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
func fromBase32(s []byte) (buf []byte, err error) {
for i, b := range s {
if b >= 'a' && b <= 'z' {
s[i] = b - 32
}
}
buflen := base32HexNoPadEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32HexNoPadEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase32(b []byte) string {
return base32HexNoPadEncoding.EncodeToString(b)
}
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
// dynamicUpdate returns true if the Rdlength is zero.
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
}
return msg[off], off + 1, nil
}
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"}
}
msg[off] = i
return off + 1, nil
}
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
if off+2 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
}
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
}
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow packing uint16"}
}
binary.BigEndian.PutUint16(msg[off:], i)
return off + 2, nil
}
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
if off+4 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
}
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
}
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
if off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing uint32"}
}
binary.BigEndian.PutUint32(msg[off:], i)
return off + 4, nil
}
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
if off+6 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
}
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])
off += 6
return i, off, nil
}
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
if off+6 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
}
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
return off, nil
}
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
if off+8 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
}
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
}
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
if off+8 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64"}
}
binary.BigEndian.PutUint64(msg[off:], i)
off += 8
return off, nil
}
func unpackString(msg []byte, off int) (string, int, error) {
if off+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
l := int(msg[off])
off++
if off+l > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
var s strings.Builder
consumed := 0
for i, b := range msg[off : off+l] {
switch {
case b == '"' || b == '\\':
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteByte('\\')
s.WriteByte(b)
consumed = i + 1
case b < ' ' || b > '~': // unprintable
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteString(escapeByte(b))
consumed = i + 1
}
}
if consumed == 0 { // no escaping needed
return string(msg[off : off+l]), off + l, nil
}
s.Write(msg[off+consumed : off+l])
return s.String(), off + l, nil
}
func packString(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packTxtString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base32"}
}
s := toBase32(msg[off:end])
return s, end, nil
}
func packStringBase32(s string, msg []byte, off int) (int, error) {
b32, err := fromBase32([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b32) > len(msg) {
return len(msg), &Error{err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
return off, nil
}
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is base64 encoded value, so we don't need an explicit length
// to be set. Thus far all RR's that have base64 encoded fields have those as their
// last one. What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base64"}
}
s := toBase64(msg[off:end])
return s, end, nil
}
func packStringBase64(s string, msg []byte, off int) (int, error) {
b64, err := fromBase64([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b64) > len(msg) {
return len(msg), &Error{err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
return off, nil
}
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is hex encoded value, so we don't need an explicit length
// to be set. NSEC and TSIG have hex fields with a length field.
// What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking hex"}
}
s := hex.EncodeToString(msg[off:end])
return s, end, nil
}
func packStringHex(s string, msg []byte, off int) (int, error) {
h, err := hex.DecodeString(s)
if err != nil {
return len(msg), err
}
if off+len(h) > len(msg) {
return len(msg), &Error{err: "overflow packing hex"}
}
copy(msg[off:off+len(h)], h)
off += len(h)
return off, nil
}
func unpackStringAny(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking anything"}
}
return string(msg[off:end]), end, nil
}
func packStringAny(s string, msg []byte, off int) (int, error) {
if off+len(s) > len(msg) {
return len(msg), &Error{err: "overflow packing anything"}
}
copy(msg[off:off+len(s)], s)
off += len(s)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off)
if err != nil {
return nil, len(msg), err
}
return txt, off, nil
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
off, err := packTxt(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0
Option:
var code uint16
if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
code = binary.BigEndian.Uint16(msg[off:])
off += 2
optlen := binary.BigEndian.Uint16(msg[off:])
off += 2
if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
e := makeDataOpt(code)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
if off < len(msg) {
goto Option
}
return edns, off, nil
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
if err != nil || off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4
if off+len(b) > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
return off, nil
}
func unpackStringOctet(msg []byte, off int) (string, int, error) {
s := string(msg[off:])
return s, len(msg), nil
}
func packStringOctet(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packOctetString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
var nsec []uint16
length, window, lastwindow := 0, 0, -1
for off < len(msg) {
if off+2 > len(msg) {
return nsec, len(msg), &Error{err: "overflow unpacking NSEC(3)"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return nsec, len(msg), &Error{err: "out of order NSEC(3) block in type bitmap"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return nsec, len(msg), &Error{err: "empty NSEC(3) block in type bitmap"}
}
if length > 32 {
return nsec, len(msg), &Error{err: "NSEC(3) block too long in type bitmap"}
}
if off+length > len(msg) {
return nsec, len(msg), &Error{err: "overflowing NSEC(3) block in type bitmap"}
}
// Walk the bytes in the window and extract the type bits
for j, b := range msg[off : off+length] {
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
return nsec, off, nil
}
// typeBitMapLen is a helper function which computes the "maximum" length of
// a the NSEC Type BitMap field.
func typeBitMapLen(bitmap []uint16) int {
var l int
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
l += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
// packDataNsec would return Error{err: "nsec bits out of order"} here, but
// when computing the length, we want do be liberal.
continue
}
lastwindow, lastlength = window, length
}
l += int(lastlength) + 2
return l
}
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
if len(bitmap) == 0 {
return off, nil
}
if off > len(msg) {
return off, &Error{err: "overflow packing nsec"}
}
toZero := msg[off:]
if maxLen := typeBitMapLen(bitmap); maxLen < len(toZero) {
toZero = toZero[:maxLen]
}
for i := range toZero {
toZero[i] = 0
}
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - t%8))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
return off, nil
}
func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) {
var xs []SVCBKeyValue
var code uint16
var length uint16
var err error
for off < len(msg) {
code, off, err = unpackUint16(msg, off)
if err != nil {
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
}
length, off, err = unpackUint16(msg, off)
if err != nil || off+int(length) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
}
e := makeSVCBKeyValue(SVCBKey(code))
if e == nil {
return nil, len(msg), &Error{err: "bad SVCB key"}
}
if err := e.unpack(msg[off : off+int(length)]); err != nil {
return nil, len(msg), err
}
if len(xs) > 0 && e.Key() <= xs[len(xs)-1].Key() {
return nil, len(msg), &Error{err: "SVCB keys not in strictly increasing order"}
}
xs = append(xs, e)
off += int(length)
}
return xs, off, nil
}
func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) {
pairs = append([]SVCBKeyValue(nil), pairs...)
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].Key() < pairs[j].Key()
})
prev := svcb_RESERVED
for _, el := range pairs {
if el.Key() == prev {
return len(msg), &Error{err: "repeated SVCB keys are not allowed"}
}
prev = el.Key()
packed, err := el.pack()
if err != nil {
return len(msg), err
}
off, err = packUint16(uint16(el.Key()), msg, off)
if err != nil {
return len(msg), &Error{err: "overflow packing SVCB"}
}
off, err = packUint16(uint16(len(packed)), msg, off)
if err != nil || off+len(packed) > len(msg) {
return len(msg), &Error{err: "overflow packing SVCB"}
}
copy(msg[off:off+len(packed)], packed)
off += len(packed)
}
return off, nil
}
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
var (
servers []string
s string
err error
)
if end > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
}
for off < end {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return servers, len(msg), err
}
servers = append(servers, s)
}
return servers, off, nil
}
func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
var err error
for _, name := range names {
off, err = packDomainName(name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
var err error
for i := range data {
off, err = packDataAplPrefix(&data[i], msg, off)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
if len(p.Network.IP) != len(p.Network.Mask) {
return len(msg), &Error{err: "address and mask lengths don't match"}
}
var err error
prefix, _ := p.Network.Mask.Size()
addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
switch len(p.Network.IP) {
case net.IPv4len:
off, err = packUint16(1, msg, off)
case net.IPv6len:
off, err = packUint16(2, msg, off)
default:
err = &Error{err: "unrecognized address family"}
}
if err != nil {
return len(msg), err
}
off, err = packUint8(uint8(prefix), msg, off)
if err != nil {
return len(msg), err
}
var n uint8
if p.Negation {
n = 0x80
}
// trim trailing zero bytes as specified in RFC3123 Sections 4.1 and 4.2.
i := len(addr) - 1
for ; i >= 0 && addr[i] == 0; i-- {
}
addr = addr[:i+1]
adflen := uint8(len(addr)) & 0x7f
off, err = packUint8(n|adflen, msg, off)
if err != nil {
return len(msg), err
}
if off+len(addr) > len(msg) {
return len(msg), &Error{err: "overflow packing APL prefix"}
}
off += copy(msg[off:], addr)
return off, nil
}
func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
var result []APLPrefix
for off < len(msg) {
prefix, end, err := unpackDataAplPrefix(msg, off)
if err != nil {
return nil, len(msg), err
}
off = end
result = append(result, prefix)
}
return result, off, nil
}
func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
family, off, err := unpackUint16(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
prefix, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
nlen, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
var ip []byte
switch family {
case 1:
ip = make([]byte, net.IPv4len)
case 2:
ip = make([]byte, net.IPv6len)
default:
return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
}
if int(prefix) > 8*len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
}
afdlen := int(nlen & 0x7f)
if afdlen > len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL length too long"}
}
if off+afdlen > len(msg) {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
}
// Address MUST NOT contain trailing zero bytes per RFC3123 Sections 4.1 and 4.2.
off += copy(ip, msg[off:off+afdlen])
if afdlen > 0 {
last := ip[afdlen-1]
if last == 0 {
return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
}
}
ipnet := net.IPNet{
IP: ip,
Mask: net.CIDRMask(int(prefix), 8*len(ip)),
}
return APLPrefix{
Negation: (nlen & 0x80) != 0,
Network: ipnet,
}, off, nil
}
package dns
// Truncate ensures the reply message will fit into the requested buffer
// size by removing records that exceed the requested size.
//
// It will first check if the reply fits without compression and then with
// compression. If it won't fit with compression, Truncate then walks the
// record adding as many records as possible without exceeding the
// requested buffer size.
//
// If the message fits within the requested size without compression,
// Truncate will set the message's Compress attribute to false. It is
// the caller's responsibility to set it back to true if they wish to
// compress the payload regardless of size.
//
// The TC bit will be set if any records were excluded from the message.
// If the TC bit is already set on the message it will be retained.
// TC indicates that the client should retry over TCP.
//
// According to RFC 2181, the TC bit should only be set if not all of the
// "required" RRs can be included in the response. Unfortunately, we have
// no way of knowing which RRs are required so we set the TC bit if any RR
// had to be omitted from the response.
//
// The appropriate buffer size can be retrieved from the requests OPT
// record, if present, and is transport specific otherwise. dns.MinMsgSize
// should be used for UDP requests without an OPT record, and
// dns.MaxMsgSize for TCP requests without an OPT record.
func (dns *Msg) Truncate(size int) {
if dns.IsTsig() != nil {
// To simplify this implementation, we don't perform
// truncation on responses with a TSIG record.
return
}
// RFC 6891 mandates that the payload size in an OPT record
// less than 512 (MinMsgSize) bytes must be treated as equal to 512 bytes.
//
// For ease of use, we impose that restriction here.
if size < MinMsgSize {
size = MinMsgSize
}
l := msgLenWithCompressionMap(dns, nil) // uncompressed length
if l <= size {
// Don't waste effort compressing this message.
dns.Compress = false
return
}
dns.Compress = true
edns0 := dns.popEdns0()
if edns0 != nil {
// Account for the OPT record that gets added at the end,
// by subtracting that length from our budget.
//
// The EDNS(0) OPT record must have the root domain and
// it's length is thus unaffected by compression.
size -= Len(edns0)
}
compression := make(map[string]struct{})
l = headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
var numAnswer int
if l < size {
l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
}
var numNS int
if l < size {
l, numNS = truncateLoop(dns.Ns, size, l, compression)
}
var numExtra int
if l < size {
_, numExtra = truncateLoop(dns.Extra, size, l, compression)
}
// See the function documentation for when we set this.
dns.Truncated = dns.Truncated || len(dns.Answer) > numAnswer ||
len(dns.Ns) > numNS || len(dns.Extra) > numExtra
dns.Answer = dns.Answer[:numAnswer]
dns.Ns = dns.Ns[:numNS]
dns.Extra = dns.Extra[:numExtra]
if edns0 != nil {
// Add the OPT record back onto the additional section.
dns.Extra = append(dns.Extra, edns0)
}
}
func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
for i, r := range rrs {
if r == nil {
continue
}
l += r.len(l, compression)
if l > size {
// Return size, rather than l prior to this record,
// to prevent any further records being added.
return size, i
}
if l == size {
return l, i + 1
}
}
return l, len(rrs)
}
......@@ -2,110 +2,93 @@ package dns
import (
"crypto/sha1"
"hash"
"io"
"encoding/hex"
"strings"
)
type saltWireFmt struct {
Salt string `dns:"size-hex"`
}
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in
// uppercase.
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string {
saltwire := new(saltWireFmt)
saltwire.Salt = salt
wire := make([]byte, DefaultMsgSize)
n, err := PackStruct(saltwire, wire, 0)
if ha != SHA1 {
return ""
}
wireSalt := make([]byte, hex.DecodedLen(len(salt)))
n, err := packStringHex(salt, wireSalt, 0)
if err != nil {
return ""
}
wire = wire[:n]
wireSalt = wireSalt[:n]
name := make([]byte, 255)
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
if err != nil {
return ""
}
name = name[:off]
var s hash.Hash
switch ha {
case SHA1:
s = sha1.New()
default:
return ""
}
s := sha1.New()
// k = 0
name = append(name, wire...)
io.WriteString(s, string(name))
s.Write(name)
s.Write(wireSalt)
nsec3 := s.Sum(nil)
// k > 0
for k := uint16(0); k < iter; k++ {
s.Reset()
nsec3 = append(nsec3, wire...)
io.WriteString(s, string(nsec3))
nsec3 = s.Sum(nil)
s.Write(nsec3)
s.Write(wireSalt)
nsec3 = s.Sum(nsec3[:0])
}
return toBase32(nsec3)
}
// Denialer is an interface that should be implemented by types that are used to denial
// answers in DNSSEC.
type Denialer interface {
// Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
Cover(name string) bool
// Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3.
Match(name string) bool
}
// Cover implements the Denialer interface.
func (rr *NSEC) Cover(name string) bool {
return true
}
// Match implements the Denialer interface.
func (rr *NSEC) Match(name string) bool {
return true
return toBase32(nsec3)
}
// Cover implements the Denialer interface.
// Cover returns true if a name is covered by the NSEC3 record.
func (rr *NSEC3) Cover(name string) bool {
// FIXME(miek): check if the zones match
// FIXME(miek): check if we're not dealing with parent nsec3
hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
labels := Split(rr.Hdr.Name)
if len(labels) < 2 {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot
if hash == rr.NextDomain {
return false // empty interval
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
if hash > rr.NextDomain { // last name, points to apex
// hname > hash
// hname > rr.NextDomain
// TODO(miek)
nextHash := rr.NextDomain
// if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
if ownerHash == nextHash && nameHash != ownerHash { // empty interval
return true
}
if hname <= hash {
return false
if ownerHash > nextHash { // end of zone
if nameHash > ownerHash { // covered since there is nothing after ownerHash
return true
}
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
}
if hname >= rr.NextDomain {
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
return false
}
return true
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
}
// Match implements the Denialer interface.
// Match returns true if a name matches the NSEC3 record
func (rr *NSEC3) Match(name string) bool {
// FIXME(miek): Check if we are in the same zone
hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
labels := Split(rr.Hdr.Name)
if len(labels) < 2 {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the .
if hash == hname {
if ownerHash == nameHash {
return true
}
return false
......
package dns
import (
"fmt"
"strings"
)
import "strings"
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
// String returns the text presentaton of the Rdata of the Private RR.
// String returns the text presentation of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
// Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer.
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
Unpack([]byte) (int, error)
// Copy copies the Rdata.
// Copy copies the Rdata into the PrivateRdata argument.
Copy(PrivateRdata) error
// Len returns the length in octets of the Rdata.
Len() int
......@@ -29,21 +25,8 @@ type PrivateRdata interface {
type PrivateRR struct {
Hdr RR_Header
Data PrivateRdata
}
func mkPrivateRR(rrtype uint16) *PrivateRR {
// Panics if RR is not an instance of PrivateRR.
rrfunc, ok := typeToRR[rrtype]
if !ok {
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
}
anyrr := rrfunc()
switch rr := anyrr.(type) {
case *PrivateRR:
return rr
}
panic(fmt.Sprintf("dns: RR is not a PrivateRR, typeToRR[%d] generator returned %T", rrtype, anyrr))
generator func() PrivateRdata // for copy
}
// Header return the RR header of r.
......@@ -52,66 +35,79 @@ func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface.
func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
l := r.Hdr.len(off, compression)
l += r.Data.Len()
return l
}
func (r *PrivateRR) copy() RR {
// make new RR like this:
rr := mkPrivateRR(r.Hdr.Rrtype)
newh := r.Hdr.copyHeader()
rr.Hdr = *newh
rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
err := r.Data.Copy(rr.Data)
if err != nil {
panic("dns: got value that could not be used to copy Private rdata")
if err := r.Data.Copy(rr.Data); err != nil {
panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
return off, nil
}
func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
off1, err := r.Data.Unpack(msg[off:])
off += off1
return off, err
}
func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l, _ = c.Next(); l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := r.Data.Parse(text)
if err != nil {
return &ParseError{"", err.Error(), l}
}
return nil
}
func (r *PrivateRR) isDuplicate(r2 RR) bool { return false }
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
typeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
FETCH:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l = <-c; l.value {
case zNewline, zEOF:
break FETCH
case zString:
text = append(text, l.token)
}
}
err := rr.Data.Parse(text)
if err != nil {
return nil, &ParseError{f, err.Error(), l}, ""
}
return rr, nil, ""
}
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
}
// PrivateHandleRemove removes defenitions required to support private RR type.
// PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
delete(typeToRR, rtype)
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(typeToparserFunc, rtype)
delete(StringToType, rtypestr)
}
return
}
package dns
// These raw* functions do not use reflection, they directly set the values
// in the buffer. There are faster than their reflection counterparts.
// RawSetId sets the message id in buf.
func rawSetId(msg []byte, i uint16) bool {
if len(msg) < 2 {
return false
}
msg[0], msg[1] = packUint16(i)
return true
}
// rawSetQuestionLen sets the length of the question section.
func rawSetQuestionLen(msg []byte, i uint16) bool {
if len(msg) < 6 {
return false
}
msg[4], msg[5] = packUint16(i)
return true
}
// rawSetAnswerLen sets the lenght of the answer section.
func rawSetAnswerLen(msg []byte, i uint16) bool {
if len(msg) < 8 {
return false
}
msg[6], msg[7] = packUint16(i)
return true
}
// rawSetsNsLen sets the lenght of the authority section.
func rawSetNsLen(msg []byte, i uint16) bool {
if len(msg) < 10 {
return false
}
msg[8], msg[9] = packUint16(i)
return true
}
// rawSetExtraLen sets the lenght of the additional section.
func rawSetExtraLen(msg []byte, i uint16) bool {
if len(msg) < 12 {
return false
}
msg[10], msg[11] = packUint16(i)
return true
}
// rawSetRdlength sets the rdlength in the header of
// the RR. The offset 'off' must be positioned at the
// start of the header of the RR, 'end' must be the
// end of the RR.
func rawSetRdlength(msg []byte, off, end int) bool {
l := len(msg)
Loop:
for {
if off+1 > l {
return false
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// End of the domainname
break Loop
}
if off+c > l {
return false
}
off += c
case 0xC0:
// pointer, next byte included, ends domainname
off++
break Loop
}
}
// The domainname has been seen, we at the start of the fixed part in the header.
// Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
off += 2 + 2 + 4
if off+2 > l {
return false
}
//off+1 is the end of the header, 'end' is the end of the rr
//so 'end' - 'off+2' is the length of the rdata
rdatalen := end - (off + 2)
if rdatalen > 0xFFFF {
return false
}
msg[off], msg[off+1] = packUint16(uint16(rdatalen))
return true
}
package dns
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// StringToOpcode is a map of opcodes to strings.
var StringToOpcode = reverseInt(OpcodeToString)
// StringToRcode is a map of rcodes to strings.
var StringToRcode = reverseInt(RcodeToString)
func init() {
// Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
StringToRcode["NOTIMPL"] = RcodeNotImplemented
}
// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// StringToCertType is the reverseof CertTypeToString.
var StringToCertType = reverseInt16(CertTypeToString)
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int, len(m))
for u, s := range m {
n[s] = u
}
return n
}
......@@ -3,8 +3,9 @@ package dns
// Dedup removes identical RRs from rrs. It preserves the original ordering.
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
// rrs.
// m is used to store the RRs temporay. If it is nil a new map will be allocated.
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
func Dedup(rrs []RR, m map[string]RR) []RR {
if m == nil {
m = make(map[string]RR)
}
......@@ -14,10 +15,11 @@ func Dedup(rrs []RR, m map[string]RR) []RR {
for _, r := range rrs {
key := normalizedString(r)
keys = append(keys, &key)
if _, ok := m[key]; ok {
if mr, ok := m[key]; ok {
// Shortest TTL wins.
if m[key].Header().Ttl > r.Header().Ttl {
m[key].Header().Ttl = r.Header().Ttl
rh, mrh := r.Header(), mr.Header()
if mrh.Ttl > rh.Ttl {
mrh.Ttl = rh.Ttl
}
continue
}
......
package dns
import (
"bufio"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strconv"
"strings"
)
type debugging bool
const debug debugging = false
func (d debugging) Printf(format string, args ...interface{}) {
if d {
log.Printf(format, args...)
}
}
const maxTok = 2048 // Largest token we can return.
const maxUint16 = 1<<16 - 1
// The maximum depth of $INCLUDE directives supported by the
// ZoneParser API.
const maxIncludeDepth = 7
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
// * Add ownernames if they are left blank;
......@@ -38,7 +33,7 @@ const (
zOwner
zClass
zDirOrigin // $ORIGIN
zDirTtl // $TTL
zDirTTL // $TTL
zDirInclude // $INCLUDE
zDirGenerate // $GENERATE
......@@ -51,13 +46,13 @@ const (
zExpectAny // Expect rrtype, ttl or class
zExpectAnyNoClass // Expect rrtype or ttl
zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
zExpectAnyNoTtl // Expect rrtype or class
zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
zExpectAnyNoTTL // Expect rrtype or class
zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL
zExpectRrtype // Expect rrtype
zExpectRrtypeBl // Whitespace BEFORE rrtype
zExpectRdata // The first element of the rdata
zExpectDirTtlBl // Space after directive $TTL
zExpectDirTtl // Directive $TTL
zExpectDirTTLBl // Space after directive $TTL
zExpectDirTTL // Directive $TTL
zExpectDirOriginBl // Space after directive $ORIGIN
zExpectDirOrigin // Directive $ORIGIN
zExpectDirIncludeBl // Space after directive $INCLUDE
......@@ -67,7 +62,7 @@ const (
)
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
// where the error occured.
// where the error occurred.
type ParseError struct {
file string
err string
......@@ -84,32 +79,26 @@ func (e *ParseError) Error() (s string) {
}
type lex struct {
token string // text of the token
tokenUpper string // uppercase text of the token
length int // lenght of the token
err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc.
line int // line in the file
column int // column in the file
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
comment string // any comment text seen
}
// Token holds the token that are returned when a zone file is parsed.
type Token struct {
// The scanned resource record when error is not nil.
RR
// When an error occured, this has the error specifics.
Error *ParseError
// A potential comment positioned after the RR and on the same line.
Comment string
}
// NewRR reads the RR contained in the string s. Only the first RR is
// returned. If s contains no RR, return nil with no error. The class
// defaults to IN and TTL defaults to 3600. The full zone file syntax
// like $TTL, $ORIGIN, etc. is supported. All fields of the returned
// RR are set, except RR.Header().Rdlength which is set to 0.
token string // text of the token
err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc.
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
line int // line in the file
column int // column in the file
}
// ttlState describes the state necessary to fill in an omitted RR TTL
type ttlState struct {
ttl uint32 // ttl is the current default TTL
isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
}
// NewRR reads the RR contained in the string s. Only the first RR is returned.
// If s contains no records, NewRR will return nil with no error.
//
// The class defaults to IN and TTL defaults to 3600. The full zone file syntax
// like $TTL, $ORIGIN, etc. is supported. All fields of the returned RR are
// set, except RR.Header().Rdlength which is set to 0.
func NewRR(s string) (RR, error) {
if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
return ReadRR(strings.NewReader(s+"\n"), "")
......@@ -117,121 +106,231 @@ func NewRR(s string) (RR, error) {
return ReadRR(strings.NewReader(s), "")
}
// ReadRR reads the RR contained in q.
// ReadRR reads the RR contained in r.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives.
//
// See NewRR for more documentation.
func ReadRR(q io.Reader, filename string) (RR, error) {
r := <-parseZoneHelper(q, ".", filename, 1)
if r == nil {
return nil, nil
}
if r.Error != nil {
return nil, r.Error
}
return r.RR, nil
func ReadRR(r io.Reader, file string) (RR, error) {
zp := NewZoneParser(r, ".", file)
zp.SetDefaultTTL(defaultTtl)
zp.SetIncludeAllowed(true)
rr, _ := zp.Next()
return rr, zp.Err()
}
// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
// returned channel, which consist out the parsed RR, a potential comment or an error.
// If there is an error the RR is nil. The string file is only used
// in error reporting. The string origin is used as the initial origin, as
// if the file would start with: $ORIGIN origin .
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
// The channel t is closed by ParseZone when the end of r is reached.
// ZoneParser is a parser for an RFC 1035 style zonefile.
//
// Each parsed RR in the zone is returned sequentially from Next. An
// optional comment can be retrieved with Comment.
//
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
// supported. Although $INCLUDE is disabled by default.
// Note that $GENERATE's range support up to a maximum of 65535 steps.
//
// Basic usage pattern when reading from a string (z) containing the
// zone data:
//
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
// if x.Error != nil {
// // Do something with x.RR
// }
// zp := NewZoneParser(strings.NewReader(z), "", "")
//
// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
// // Do something with rr
// }
//
// Comments specified after an RR (and on the same line!) are returned too:
// if err := zp.Err(); err != nil {
// // log.Println(err)
// }
//
// Comments specified after an RR (and on the same line!) are
// returned too:
//
// foo. IN A 10.0.0.1 ; this is a comment
//
// The text "; this is comment" is returned in Token.Comment. Comments inside the
// RR are discarded. Comments on a line by themselves are discarded too.
func ParseZone(r io.Reader, origin, file string) chan *Token {
return parseZoneHelper(r, origin, file, 10000)
// The text "; this is comment" is returned from Comment. Comments inside
// the RR are returned concatenated along with the RR. Comments on a line
// by themselves are discarded.
//
// Callers should not assume all returned data in an Resource Record is
// syntactically correct, e.g. illegal base64 in RRSIGs will be returned as-is.
type ZoneParser struct {
c *zlexer
parseErr *ParseError
origin string
file string
defttl *ttlState
h RR_Header
// sub is used to parse $INCLUDE files and $GENERATE directives.
// Next, by calling subNext, forwards the resulting RRs from this
// sub parser to the calling code.
sub *ZoneParser
osFile *os.File
includeDepth uint8
includeAllowed bool
generateDisallowed bool
}
// NewZoneParser returns an RFC 1035 style zonefile parser that reads
// from r.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives. The string origin is used as the initial
// origin, as if the file would start with an $ORIGIN directive.
func NewZoneParser(r io.Reader, origin, file string) *ZoneParser {
var pe *ParseError
if origin != "" {
origin = Fqdn(origin)
if _, ok := IsDomainName(origin); !ok {
pe = &ParseError{file, "bad initial origin name", lex{}}
}
}
return &ZoneParser{
c: newZLexer(r),
parseErr: pe,
origin: origin,
file: file,
}
}
func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
t := make(chan *Token, chansize)
go parseZone(r, origin, file, t, 0)
return t
// SetDefaultTTL sets the parsers default TTL to ttl.
func (zp *ZoneParser) SetDefaultTTL(ttl uint32) {
zp.defttl = &ttlState{ttl, false}
}
func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
defer func() {
if include == 0 {
close(t)
// SetIncludeAllowed controls whether $INCLUDE directives are
// allowed. $INCLUDE directives are not supported by default.
//
// The $INCLUDE directive will open and read from a user controlled
// file on the system. Even if the file is not a valid zonefile, the
// contents of the file may be revealed in error messages, such as:
//
// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31
// /etc/shadow: dns: not a TTL: "root:$6$<redacted>::0:99999:7:::" at line: 1:125
func (zp *ZoneParser) SetIncludeAllowed(v bool) {
zp.includeAllowed = v
}
// Err returns the first non-EOF error that was encountered by the
// ZoneParser.
func (zp *ZoneParser) Err() error {
if zp.parseErr != nil {
return zp.parseErr
}
if zp.sub != nil {
if err := zp.sub.Err(); err != nil {
return err
}
}()
s := scanInit(r)
c := make(chan lex)
// Start the lexer
go zlexer(s, c)
// 6 possible beginnings of a line, _ is a space
// 0. zRRTYPE -> all omitted until the rrtype
// 1. zOwner _ zRrtype -> class/ttl omitted
// 2. zOwner _ zString _ zRrtype -> class omitted
// 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
// 4. zOwner _ zClass _ zRrtype -> ttl omitted
// 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
// After detecting these, we know the zRrtype so we can jump to functions
// handling the rdata for each of these types.
}
if origin == "" {
origin = "."
return zp.c.Err()
}
func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) {
zp.parseErr = &ParseError{zp.file, err, l}
return nil, false
}
// Comment returns an optional text comment that occurred alongside
// the RR.
func (zp *ZoneParser) Comment() string {
if zp.parseErr != nil {
return ""
}
if zp.sub != nil {
return zp.sub.Comment()
}
return zp.c.Comment()
}
func (zp *ZoneParser) subNext() (RR, bool) {
if rr, ok := zp.sub.Next(); ok {
return rr, true
}
if zp.sub.osFile != nil {
zp.sub.osFile.Close()
zp.sub.osFile = nil
}
if zp.sub.Err() != nil {
// We have errors to surface.
return nil, false
}
zp.sub = nil
return zp.Next()
}
// Next advances the parser to the next RR in the zonefile and
// returns the (RR, true). It will return (nil, false) when the
// parsing stops, either by reaching the end of the input or an
// error. After Next returns (nil, false), the Err method will return
// any error that occurred during parsing.
func (zp *ZoneParser) Next() (RR, bool) {
if zp.parseErr != nil {
return nil, false
}
origin = Fqdn(origin)
if _, ok := IsDomainName(origin); !ok {
t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
return
if zp.sub != nil {
return zp.subNext()
}
// 6 possible beginnings of a line (_ is a space):
//
// 0. zRRTYPE -> all omitted until the rrtype
// 1. zOwner _ zRrtype -> class/ttl omitted
// 2. zOwner _ zString _ zRrtype -> class omitted
// 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
// 4. zOwner _ zClass _ zRrtype -> ttl omitted
// 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
//
// After detecting these, we know the zRrtype so we can jump to functions
// handling the rdata for each of these types.
st := zExpectOwnerDir // initial state
var h RR_Header
var defttl uint32 = defaultTtl
var prevName string
for l := range c {
// Lexer spotted an error already
if l.err == true {
t <- &Token{Error: &ParseError{f, l.token, l}}
return
h := &zp.h
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
// zlexer spotted an error already
if l.err {
return zp.setParseError(l.token, l)
}
switch st {
case zExpectOwnerDir:
// We can also expect a directive, like $TTL or $ORIGIN
h.Ttl = defttl
if zp.defttl != nil {
h.Ttl = zp.defttl.ttl
}
h.Class = ClassINET
switch l.value {
case zNewline:
st = zExpectOwnerDir
case zOwner:
h.Name = l.token
if l.token[0] == '@' {
h.Name = origin
prevName = h.Name
st = zExpectOwnerBl
break
}
if h.Name[l.length-1] != '.' {
h.Name = appendOrigin(h.Name, origin)
}
_, ok := IsDomainName(l.token)
name, ok := toAbsoluteName(l.token, zp.origin)
if !ok {
t <- &Token{Error: &ParseError{f, "bad owner name", l}}
return
return zp.setParseError("bad owner name", l)
}
prevName = h.Name
h.Name = name
st = zExpectOwnerBl
case zDirTtl:
st = zExpectDirTtlBl
case zDirTTL:
st = zExpectDirTTLBl
case zDirOrigin:
st = zExpectDirOriginBl
case zDirInclude:
......@@ -239,313 +338,492 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
case zDirGenerate:
st = zExpectDirGenerateBl
case zRrtpe:
h.Name = prevName
h.Rrtype = l.torc
st = zExpectRdata
case zClass:
h.Name = prevName
h.Class = l.torc
st = zExpectAnyNoClassBl
case zBlank:
// Discard, can happen when there is nothing on the
// line except the RR type
case zString:
ttl, ok := stringToTtl(l.token)
ttl, ok := stringToTTL(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
return zp.setParseError("not a TTL", l)
}
h.Ttl = ttl
// Don't about the defttl, we should take the $TTL value
// defttl = ttl
st = zExpectAnyNoTtlBl
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
}
st = zExpectAnyNoTTLBl
default:
t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
return
return zp.setParseError("syntax error at beginning", l)
}
case zExpectDirIncludeBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
return
return zp.setParseError("no blank after $INCLUDE-directive", l)
}
st = zExpectDirInclude
case zExpectDirInclude:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
return
return zp.setParseError("expecting $INCLUDE value, not this...", l)
}
neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
l := <-c
switch l.value {
neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one
switch l, _ := zp.c.Next(); l.value {
case zBlank:
l := <-c
l, _ := zp.c.Next()
if l.value == zString {
if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err {
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
return
}
// a new origin is specified.
if l.token[l.length-1] != '.' {
if origin != "." { // Prevent .. endings
neworigin = l.token + "." + origin
} else {
neworigin = l.token + origin
}
} else {
neworigin = l.token
name, ok := toAbsoluteName(l.token, zp.origin)
if !ok {
return zp.setParseError("bad origin name", l)
}
neworigin = name
}
case zNewline, zEOF:
// Ok
default:
t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
return
return zp.setParseError("garbage after $INCLUDE", l)
}
if !zp.includeAllowed {
return zp.setParseError("$INCLUDE directive not allowed", l)
}
if zp.includeDepth >= maxIncludeDepth {
return zp.setParseError("too deeply nested $INCLUDE", l)
}
// Start with the new file
r1, e1 := os.Open(l.token)
if e1 != nil {
t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
return
includePath := l.token
if !filepath.IsAbs(includePath) {
includePath = filepath.Join(filepath.Dir(zp.file), includePath)
}
if include+1 > 7 {
t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
return
r1, e1 := os.Open(includePath)
if e1 != nil {
var as string
if !filepath.IsAbs(l.token) {
as = fmt.Sprintf(" as `%s'", includePath)
}
msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1)
return zp.setParseError(msg, l)
}
parseZone(r1, l.token, neworigin, t, include+1)
st = zExpectOwnerDir
case zExpectDirTtlBl:
zp.sub = NewZoneParser(r1, neworigin, includePath)
zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1
zp.sub.SetIncludeAllowed(true)
return zp.subNext()
case zExpectDirTTLBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
return
return zp.setParseError("no blank after $TTL-directive", l)
}
st = zExpectDirTtl
case zExpectDirTtl:
st = zExpectDirTTL
case zExpectDirTTL:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
return
return zp.setParseError("expecting $TTL value, not this...", l)
}
if e, _ := slurpRemainder(c, f); e != nil {
t <- &Token{Error: e}
return
if err := slurpRemainder(zp.c); err != nil {
return zp.setParseError(err.err, err.lex)
}
ttl, ok := stringToTtl(l.token)
ttl, ok := stringToTTL(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
return
return zp.setParseError("expecting $TTL value, not this...", l)
}
defttl = ttl
zp.defttl = &ttlState{ttl, true}
st = zExpectOwnerDir
case zExpectDirOriginBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
return
return zp.setParseError("no blank after $ORIGIN-directive", l)
}
st = zExpectDirOrigin
case zExpectDirOrigin:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
return
}
if e, _ := slurpRemainder(c, f); e != nil {
t <- &Token{Error: e}
return zp.setParseError("expecting $ORIGIN value, not this...", l)
}
if _, ok := IsDomainName(l.token); !ok {
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
return
if err := slurpRemainder(zp.c); err != nil {
return zp.setParseError(err.err, err.lex)
}
if l.token[l.length-1] != '.' {
if origin != "." { // Prevent .. endings
origin = l.token + "." + origin
} else {
origin = l.token + origin
}
} else {
origin = l.token
name, ok := toAbsoluteName(l.token, zp.origin)
if !ok {
return zp.setParseError("bad origin name", l)
}
zp.origin = name
st = zExpectOwnerDir
case zExpectDirGenerateBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
return
return zp.setParseError("no blank after $GENERATE-directive", l)
}
st = zExpectDirGenerate
case zExpectDirGenerate:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
return
if zp.generateDisallowed {
return zp.setParseError("nested $GENERATE directive not allowed", l)
}
if e := generate(l, c, t, origin); e != "" {
t <- &Token{Error: &ParseError{f, e, l}}
return
if l.value != zString {
return zp.setParseError("expecting $GENERATE value, not this...", l)
}
st = zExpectOwnerDir
return zp.generate(l)
case zExpectOwnerBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
return
return zp.setParseError("no blank after owner", l)
}
st = zExpectAny
case zExpectAny:
switch l.value {
case zRrtpe:
if zp.defttl == nil {
return zp.setParseError("missing TTL with no previous value", l)
}
h.Rrtype = l.torc
st = zExpectRdata
case zClass:
h.Class = l.torc
st = zExpectAnyNoClassBl
case zString:
ttl, ok := stringToTtl(l.token)
ttl, ok := stringToTTL(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
return zp.setParseError("not a TTL", l)
}
h.Ttl = ttl
// defttl = ttl // don't set the defttl here
st = zExpectAnyNoTtlBl
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
}
st = zExpectAnyNoTTLBl
default:
t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
return
return zp.setParseError("expecting RR type, TTL or class, not this...", l)
}
case zExpectAnyNoClassBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before class", l}}
return
return zp.setParseError("no blank before class", l)
}
st = zExpectAnyNoClass
case zExpectAnyNoTtlBl:
case zExpectAnyNoTTLBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
return
return zp.setParseError("no blank before TTL", l)
}
st = zExpectAnyNoTtl
case zExpectAnyNoTtl:
st = zExpectAnyNoTTL
case zExpectAnyNoTTL:
switch l.value {
case zClass:
h.Class = l.torc
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
default:
t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
return
return zp.setParseError("expecting RR type or class, not this...", l)
}
case zExpectAnyNoClass:
switch l.value {
case zString:
ttl, ok := stringToTtl(l.token)
ttl, ok := stringToTTL(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
return zp.setParseError("not a TTL", l)
}
h.Ttl = ttl
// defttl = ttl // don't set the def ttl anymore
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
}
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
default:
t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
return
return zp.setParseError("expecting RR type or TTL, not this...", l)
}
case zExpectRrtypeBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
return
return zp.setParseError("no blank before RR type", l)
}
st = zExpectRrtype
case zExpectRrtype:
if l.value != zRrtpe {
t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
return
return zp.setParseError("unknown RR type", l)
}
h.Rrtype = l.torc
st = zExpectRdata
case zExpectRdata:
r, e, c1 := setRR(h, c, origin, f)
if e != nil {
// If e.lex is nil than we have encounter a unknown RR type
// in that case we substitute our current lex token
if e.lex.token == "" && e.lex.value == 0 {
e.lex = l // Uh, dirty
var (
rr RR
parseAsRFC3597 bool
)
if newFn, ok := TypeToRR[h.Rrtype]; ok {
rr = newFn()
*rr.Header() = *h
// We may be parsing a known RR type using the RFC3597 format.
// If so, we handle that here in a generic way.
//
// This is also true for PrivateRR types which will have the
// RFC3597 parsing done for them and the Unpack method called
// to populate the RR instead of simply deferring to Parse.
if zp.c.Peek().token == "\\#" {
parseAsRFC3597 = true
}
} else {
rr = &RFC3597{Hdr: *h}
}
_, isPrivate := rr.(*PrivateRR)
if !isPrivate && zp.c.Peek().token == "" {
// This is a dynamic update rr.
// TODO(tmthrgd): Previously slurpRemainder was only called
// for certain RR types, which may have been important.
if err := slurpRemainder(zp.c); err != nil {
return zp.setParseError(err.err, err.lex)
}
t <- &Token{Error: e}
return
return rr, true
} else if l.value == zNewline {
return zp.setParseError("unexpected newline", l)
}
t <- &Token{RR: r, Comment: c1}
st = zExpectOwnerDir
parseAsRR := rr
if parseAsRFC3597 {
parseAsRR = &RFC3597{Hdr: *h}
}
if err := parseAsRR.parse(zp.c, zp.origin); err != nil {
// err is a concrete *ParseError without the file field set.
// The setParseError call below will construct a new
// *ParseError with file set to zp.file.
// err.lex may be nil in which case we substitute our current
// lex token.
if err.lex == (lex{}) {
return zp.setParseError(err.err, l)
}
return zp.setParseError(err.err, err.lex)
}
if parseAsRFC3597 {
err := parseAsRR.(*RFC3597).fromRFC3597(rr)
if err != nil {
return zp.setParseError(err.Error(), l)
}
}
return rr, true
}
}
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
// is not an error, because an empty zone file is still a zone file.
return nil, false
}
// zlexer scans the sourcefile and returns tokens on the channel c.
func zlexer(s *scan, c chan lex) {
var l lex
str := make([]byte, maxTok) // Should be enough for any token
stri := 0 // Offset in str (0 means empty)
com := make([]byte, maxTok) // Hold comment text
comi := 0
quote := false
escape := false
space := false
commt := false
rrtype := false
owner := true
brace := 0
x, err := s.tokenText()
defer close(c)
for err == nil {
l.column = s.position.Column
l.line = s.position.Line
if stri >= maxTok {
type zlexer struct {
br io.ByteReader
readErr error
line int
column int
comBuf string
comment string
l lex
cachedL *lex
brace int
quote bool
space bool
commt bool
rrtype bool
owner bool
nextL bool
eol bool // end-of-line
}
func newZLexer(r io.Reader) *zlexer {
br, ok := r.(io.ByteReader)
if !ok {
br = bufio.NewReaderSize(r, 1024)
}
return &zlexer{
br: br,
line: 1,
owner: true,
}
}
func (zl *zlexer) Err() error {
if zl.readErr == io.EOF {
return nil
}
return zl.readErr
}
// readByte returns the next byte from the input
func (zl *zlexer) readByte() (byte, bool) {
if zl.readErr != nil {
return 0, false
}
c, err := zl.br.ReadByte()
if err != nil {
zl.readErr = err
return 0, false
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if zl.eol {
zl.line++
zl.column = 0
zl.eol = false
}
if c == '\n' {
zl.eol = true
} else {
zl.column++
}
return c, true
}
func (zl *zlexer) Peek() lex {
if zl.nextL {
return zl.l
}
l, ok := zl.Next()
if !ok {
return l
}
if zl.nextL {
// Cache l. Next returns zl.cachedL then zl.l.
zl.cachedL = &l
} else {
// In this case l == zl.l, so we just tell Next to return zl.l.
zl.nextL = true
}
return l
}
func (zl *zlexer) Next() (lex, bool) {
l := &zl.l
switch {
case zl.cachedL != nil:
l, zl.cachedL = zl.cachedL, nil
return *l, true
case zl.nextL:
zl.nextL = false
return *l, true
case l.err:
// Parsing errors should be sticky.
return lex{value: zEOF}, false
}
var (
str [maxTok]byte // Hold string text
com [maxTok]byte // Hold comment text
stri int // Offset in str (0 means empty)
comi int // Offset in com (0 means empty)
escape bool
)
if zl.comBuf != "" {
comi = copy(com[:], zl.comBuf)
zl.comBuf = ""
}
zl.comment = ""
for x, ok := zl.readByte(); ok; x, ok = zl.readByte() {
l.line, l.column = zl.line, zl.column
if stri >= len(str) {
l.token = "token length insufficient for parsing"
l.err = true
debug.Printf("[%+v]", l.token)
c <- l
return
return *l, true
}
if comi >= maxTok {
if comi >= len(com) {
l.token = "comment length insufficient for parsing"
l.err = true
debug.Printf("[%+v]", l.token)
c <- l
return
return *l, true
}
switch x {
case ' ', '\t':
if escape {
escape = false
str[stri] = x
stri++
break
}
if quote {
// Inside quotes this is legal
if escape || zl.quote {
// Inside quotes or escaped this is legal.
str[stri] = x
stri++
escape = false
break
}
if commt {
if zl.commt {
com[comi] = x
comi++
break
}
var retL lex
if stri == 0 {
// Space directly in the beginning, handled in the grammar
} else if owner {
} else if zl.owner {
// If we have a string and its the first, make it an owner
l.value = zOwner
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
// escape $... start with a \ not a $, so this will work
switch l.tokenUpper {
switch strings.ToUpper(l.token) {
case "$TTL":
l.value = zDirTtl
l.value = zDirTTL
case "$ORIGIN":
l.value = zDirOrigin
case "$INCLUDE":
......@@ -553,255 +831,328 @@ func zlexer(s *scan, c chan lex) {
case "$GENERATE":
l.value = zDirGenerate
}
debug.Printf("[7 %+v]", l.token)
c <- l
retL = *l
} else {
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
if !rrtype {
if t, ok := StringToType[l.tokenUpper]; ok {
if !zl.rrtype {
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; ok {
l.value = zRrtpe
l.torc = t
rrtype = true
} else {
if strings.HasPrefix(l.tokenUpper, "TYPE") {
t, ok := typeToInt(l.token)
if !ok {
l.token = "unknown RR type"
l.err = true
c <- l
return
}
l.value = zRrtpe
l.torc = t
zl.rrtype = true
} else if strings.HasPrefix(tokenUpper, "TYPE") {
t, ok := typeToInt(l.token)
if !ok {
l.token = "unknown RR type"
l.err = true
return *l, true
}
l.value = zRrtpe
l.torc = t
zl.rrtype = true
}
if t, ok := StringToClass[l.tokenUpper]; ok {
if t, ok := StringToClass[tokenUpper]; ok {
l.value = zClass
l.torc = t
} else {
if strings.HasPrefix(l.tokenUpper, "CLASS") {
t, ok := classToInt(l.token)
if !ok {
l.token = "unknown class"
l.err = true
c <- l
return
}
l.value = zClass
l.torc = t
} else if strings.HasPrefix(tokenUpper, "CLASS") {
t, ok := classToInt(l.token)
if !ok {
l.token = "unknown class"
l.err = true
return *l, true
}
l.value = zClass
l.torc = t
}
}
debug.Printf("[6 %+v]", l.token)
c <- l
retL = *l
}
stri = 0
// I reverse space stuff here
if !space && !commt {
zl.owner = false
if !zl.space {
zl.space = true
l.value = zBlank
l.token = " "
l.length = 1
debug.Printf("[5 %+v]", l.token)
c <- l
if retL == (lex{}) {
return *l, true
}
zl.nextL = true
}
if retL != (lex{}) {
return retL, true
}
owner = false
space = true
case ';':
if escape {
escape = false
if escape || zl.quote {
// Inside quotes or escaped this is legal.
str[stri] = x
stri++
escape = false
break
}
if quote {
// Inside quotes this is legal
str[stri] = x
stri++
break
zl.commt = true
zl.comBuf = ""
if comi > 1 {
// A newline was previously seen inside a comment that
// was inside braces and we delayed adding it until now.
com[comi] = ' ' // convert newline to space
comi++
if comi >= len(com) {
l.token = "comment length insufficient for parsing"
l.err = true
return *l, true
}
}
com[comi] = ';'
comi++
if stri > 0 {
zl.comBuf = string(com[:comi])
l.value = zString
l.token = string(str[:stri])
l.length = stri
debug.Printf("[4 %+v]", l.token)
c <- l
stri = 0
return *l, true
}
commt = true
com[comi] = ';'
comi++
case '\r':
escape = false
if quote {
if zl.quote {
str[stri] = x
stri++
break
}
// discard if outside of quotes
case '\n':
escape = false
// Escaped newline
if quote {
if zl.quote {
str[stri] = x
stri++
break
}
// inside quotes this is legal
if commt {
if zl.commt {
// Reset a comment
commt = false
rrtype = false
stri = 0
zl.commt = false
zl.rrtype = false
// If not in a brace this ends the comment AND the RR
if brace == 0 {
owner = true
owner = true
if zl.brace == 0 {
zl.owner = true
l.value = zNewline
l.token = "\n"
l.length = 1
l.comment = string(com[:comi])
debug.Printf("[3 %+v %+v]", l.token, l.comment)
c <- l
l.comment = ""
comi = 0
break
zl.comment = string(com[:comi])
return *l, true
}
com[comi] = ' ' // convert newline to space
comi++
zl.comBuf = string(com[:comi])
break
}
if brace == 0 {
if zl.brace == 0 {
// If there is previous text, we should output it here
var retL lex
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
if !rrtype {
if t, ok := StringToType[l.tokenUpper]; ok {
if !zl.rrtype {
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; ok {
zl.rrtype = true
l.value = zRrtpe
l.torc = t
rrtype = true
}
}
debug.Printf("[2 %+v]", l.token)
c <- l
retL = *l
}
l.value = zNewline
l.token = "\n"
l.length = 1
debug.Printf("[1 %+v]", l.token)
c <- l
stri = 0
commt = false
rrtype = false
owner = true
comi = 0
zl.comment = zl.comBuf
zl.comBuf = ""
zl.rrtype = false
zl.owner = true
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
}
case '\\':
// comments do not get escaped chars, everything is copied
if commt {
if zl.commt {
com[comi] = x
comi++
break
}
// something already escaped must be in string
if escape {
str[stri] = x
stri++
escape = false
break
}
// something escaped outside of string gets added to string
str[stri] = x
stri++
escape = true
case '"':
if commt {
if zl.commt {
com[comi] = x
comi++
break
}
if escape {
str[stri] = x
stri++
escape = false
break
}
space = false
zl.space = false
// send previous gathered text and the quote
var retL lex
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
l.length = stri
debug.Printf("[%+v]", l.token)
c <- l
stri = 0
retL = *l
}
// send quote itself as separate token
l.value = zQuote
l.token = "\""
l.length = 1
c <- l
quote = !quote
zl.quote = !zl.quote
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
case '(', ')':
if commt {
if zl.commt {
com[comi] = x
comi++
break
}
if escape {
if escape || zl.quote {
// Inside quotes or escaped this is legal.
str[stri] = x
stri++
escape = false
break
}
if quote {
str[stri] = x
stri++
break
}
switch x {
case ')':
brace--
if brace < 0 {
zl.brace--
if zl.brace < 0 {
l.token = "extra closing brace"
l.err = true
debug.Printf("[%+v]", l.token)
c <- l
return
return *l, true
}
case '(':
brace++
zl.brace++
}
default:
escape = false
if commt {
if zl.commt {
com[comi] = x
comi++
break
}
str[stri] = x
stri++
space = false
zl.space = false
}
x, err = s.tokenText()
}
if zl.readErr != nil && zl.readErr != io.EOF {
// Don't return any tokens after a read error occurs.
return lex{value: zEOF}, false
}
var retL lex
if stri > 0 {
// Send remainder
l.token = string(str[:stri])
l.length = stri
// Send remainder of str
l.value = zString
debug.Printf("[%+v]", l.token)
c <- l
l.token = string(str[:stri])
retL = *l
if comi <= 0 {
return retL, true
}
}
if comi > 0 {
// Send remainder of com
l.value = zNewline
l.token = "\n"
zl.comment = string(com[:comi])
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
}
if zl.brace != 0 {
l.token = "unbalanced brace"
l.err = true
return *l, true
}
return lex{value: zEOF}, false
}
func (zl *zlexer) Comment() string {
if zl.l.err {
return ""
}
return zl.comment
}
// Extract the class number from CLASSxx
......@@ -810,8 +1161,8 @@ func classToInt(token string) (uint16, bool) {
if len(token) < offset+1 {
return 0, false
}
class, ok := strconv.Atoi(token[offset:])
if ok != nil || class > maxUint16 {
class, err := strconv.ParseUint(token[offset:], 10, 16)
if err != nil {
return 0, false
}
return uint16(class), true
......@@ -823,17 +1174,16 @@ func typeToInt(token string) (uint16, bool) {
if len(token) < offset+1 {
return 0, false
}
typ, ok := strconv.Atoi(token[offset:])
if ok != nil || typ > maxUint16 {
typ, err := strconv.ParseUint(token[offset:], 10, 16)
if err != nil {
return 0, false
}
return uint16(typ), true
}
// Parse things like 2w, 2m, etc, Return the time in seconds.
func stringToTtl(token string) (uint32, bool) {
s := uint32(0)
i := uint32(0)
// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
func stringToTTL(token string) (uint32, bool) {
var s, i uint32
for _, c := range token {
switch c {
case 's', 'S':
......@@ -876,11 +1226,29 @@ func stringToCm(token string) (e, m uint8, ok bool) {
if cmeters, err = strconv.Atoi(s[1]); err != nil {
return
}
// There's no point in having more than 2 digits in this part, and would rather make the implementation complicated ('123' should be treated as '12').
// So we simply reject it.
// We also make sure the first character is a digit to reject '+-' signs.
if len(s[1]) > 2 || s[1][0] < '0' || s[1][0] > '9' {
return
}
if len(s[1]) == 1 {
// 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm.
cmeters *= 10
}
if s[0] == "" {
// This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm).
break
}
fallthrough
case 1:
if meters, err = strconv.Atoi(s[0]); err != nil {
return
}
// RFC1876 states the max value is 90000000.00. The latter two conditions enforce it.
if s[0][0] < '0' || s[0][0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) {
return
}
case 0:
// huh?
return 0, 0, false
......@@ -893,17 +1261,42 @@ func stringToCm(token string) (e, m uint8, ok bool) {
e = 0
val = cmeters
}
for val > 10 {
for val >= 10 {
e++
val /= 10
}
if e > 9 {
ok = false
}
m = uint8(val)
return
}
func toAbsoluteName(name, origin string) (absolute string, ok bool) {
// check for an explicit origin reference
if name == "@" {
// require a nonempty origin
if origin == "" {
return "", false
}
return origin, true
}
// require a valid domain name
_, ok = IsDomainName(name)
if !ok || name == "" {
return "", false
}
// check if name is already absolute
if IsFqdn(name) {
return name, true
}
// require a nonempty origin
if origin == "" {
return "", false
}
return appendOrigin(name, origin), true
}
func appendOrigin(name, origin string) string {
if origin == "." {
return name + origin
......@@ -913,6 +1306,9 @@ func appendOrigin(name, origin string) string {
// LOC record helper function
func locCheckNorth(token string, latitude uint32) (uint32, bool) {
if latitude > 90*1000*60*60 {
return latitude, false
}
switch token {
case "n", "N":
return LOC_EQUATOR + latitude, true
......@@ -924,6 +1320,9 @@ func locCheckNorth(token string, latitude uint32) (uint32, bool) {
// LOC record helper function
func locCheckEast(token string, longitude uint32) (uint32, bool) {
if longitude > 180*1000*60*60 {
return longitude, false
}
switch token {
case "e", "E":
return LOC_EQUATOR + longitude, true
......@@ -933,24 +1332,21 @@ func locCheckEast(token string, longitude uint32) (uint32, bool) {
return longitude, false
}
// "Eat" the rest of the "line". Return potential comments
func slurpRemainder(c chan lex, f string) (*ParseError, string) {
l := <-c
com := ""
// "Eat" the rest of the "line"
func slurpRemainder(c *zlexer) *ParseError {
l, _ := c.Next()
switch l.value {
case zBlank:
l = <-c
com = l.comment
l, _ = c.Next()
if l.value != zNewline && l.value != zEOF {
return &ParseError{f, "garbage after rdata", l}, ""
return &ParseError{"", "garbage after rdata", l}
}
case zNewline:
com = l.comment
case zEOF:
default:
return &ParseError{f, "garbage after rdata", l}, ""
return &ParseError{"", "garbage after rdata", l}
}
return nil, com
return nil
}
// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
......@@ -959,13 +1355,13 @@ func stringToNodeID(l lex) (uint64, *ParseError) {
if len(l.token) < 19 {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
// There must be three colons at fixes postitions, if not its a parse error
// There must be three colons at fixes positions, if not its a parse error
if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
u, e := strconv.ParseUint(s, 16, 64)
if e != nil {
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
return u, nil
......
package dns
import (
"bytes"
"encoding/base64"
"net"
"strconv"
"strings"
)
// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
// or an error
func endingToString(c *zlexer, errstr string) (string, *ParseError) {
var buffer bytes.Buffer
l, _ := c.Next() // zString
for l.value != zNewline && l.value != zEOF {
if l.err {
return buffer.String(), &ParseError{"", errstr, l}
}
switch l.value {
case zString:
buffer.WriteString(l.token)
case zBlank: // Ok
default:
return "", &ParseError{"", errstr, l}
}
l, _ = c.Next()
}
return buffer.String(), nil
}
// A remainder of the rdata with embedded spaces, split on unquoted whitespace
// and return the parsed string slice or an error
func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) {
// Get the remaining data until we see a zNewline
l, _ := c.Next()
if l.err {
return nil, &ParseError{"", errstr, l}
}
// Build the slice
s := make([]string, 0)
quote := false
empty := false
for l.value != zNewline && l.value != zEOF {
if l.err {
return nil, &ParseError{"", errstr, l}
}
switch l.value {
case zString:
empty = false
if len(l.token) > 255 {
// split up tokens that are larger than 255 into 255-chunks
sx := []string{}
p, i := 0, 255
for {
if i <= len(l.token) {
sx = append(sx, l.token[p:i])
} else {
sx = append(sx, l.token[p:])
break
}
p, i = p+255, i+255
}
s = append(s, sx...)
break
}
s = append(s, l.token)
case zBlank:
if quote {
// zBlank can only be seen in between txt parts.
return nil, &ParseError{"", errstr, l}
}
case zQuote:
if empty && quote {
s = append(s, "")
}
quote = !quote
empty = true
default:
return nil, &ParseError{"", errstr, l}
}
l, _ = c.Next()
}
if quote {
return nil, &ParseError{"", errstr, l}
}
return s, nil
}
func (rr *A) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
rr.A = net.ParseIP(l.token)
// IPv4 addresses cannot include ":".
// We do this rather than use net.IP's To4() because
// To4() treats IPv4-mapped IPv6 addresses as being
// IPv4.
isIPv4 := !strings.Contains(l.token, ":")
if rr.A == nil || !isIPv4 || l.err {
return &ParseError{"", "bad A A", l}
}
return slurpRemainder(c)
}
func (rr *AAAA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
rr.AAAA = net.ParseIP(l.token)
// IPv6 addresses must include ":", and IPv4
// addresses cannot include ":".
isIPv6 := strings.Contains(l.token, ":")
if rr.AAAA == nil || !isIPv6 || l.err {
return &ParseError{"", "bad AAAA AAAA", l}
}
return slurpRemainder(c)
}
func (rr *NS) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NS Ns", l}
}
rr.Ns = name
return slurpRemainder(c)
}
func (rr *PTR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad PTR Ptr", l}
}
rr.Ptr = name
return slurpRemainder(c)
}
func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NSAP-PTR Ptr", l}
}
rr.Ptr = name
return slurpRemainder(c)
}
func (rr *RP) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
mbox, mboxOk := toAbsoluteName(l.token, o)
if l.err || !mboxOk {
return &ParseError{"", "bad RP Mbox", l}
}
rr.Mbox = mbox
c.Next() // zBlank
l, _ = c.Next()
rr.Txt = l.token
txt, txtOk := toAbsoluteName(l.token, o)
if l.err || !txtOk {
return &ParseError{"", "bad RP Txt", l}
}
rr.Txt = txt
return slurpRemainder(c)
}
func (rr *MR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MR Mr", l}
}
rr.Mr = name
return slurpRemainder(c)
}
func (rr *MB) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MB Mb", l}
}
rr.Mb = name
return slurpRemainder(c)
}
func (rr *MG) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MG Mg", l}
}
rr.Mg = name
return slurpRemainder(c)
}
func (rr *HINFO) parse(c *zlexer, o string) *ParseError {
chunks, e := endingToTxtSlice(c, "bad HINFO Fields")
if e != nil {
return e
}
if ln := len(chunks); ln == 0 {
return nil
} else if ln == 1 {
// Can we split it?
if out := strings.Fields(chunks[0]); len(out) > 1 {
chunks = out
} else {
chunks = append(chunks, "")
}
}
rr.Cpu = chunks[0]
rr.Os = strings.Join(chunks[1:], " ")
return nil
}
func (rr *MINFO) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
rmail, rmailOk := toAbsoluteName(l.token, o)
if l.err || !rmailOk {
return &ParseError{"", "bad MINFO Rmail", l}
}
rr.Rmail = rmail
c.Next() // zBlank
l, _ = c.Next()
rr.Email = l.token
email, emailOk := toAbsoluteName(l.token, o)
if l.err || !emailOk {
return &ParseError{"", "bad MINFO Email", l}
}
rr.Email = email
return slurpRemainder(c)
}
func (rr *MF) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MF Mf", l}
}
rr.Mf = name
return slurpRemainder(c)
}
func (rr *MD) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MD Md", l}
}
rr.Md = name
return slurpRemainder(c)
}
func (rr *MX) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad MX Pref", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Mx = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MX Mx", l}
}
rr.Mx = name
return slurpRemainder(c)
}
func (rr *RT) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil {
return &ParseError{"", "bad RT Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Host = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad RT Host", l}
}
rr.Host = name
return slurpRemainder(c)
}
func (rr *AFSDB) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad AFSDB Subtype", l}
}
rr.Subtype = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Hostname = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad AFSDB Hostname", l}
}
rr.Hostname = name
return slurpRemainder(c)
}
func (rr *X25) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if l.err {
return &ParseError{"", "bad X25 PSDNAddress", l}
}
rr.PSDNAddress = l.token
return slurpRemainder(c)
}
func (rr *KX) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad KX Pref", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Exchanger = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad KX Exchanger", l}
}
rr.Exchanger = name
return slurpRemainder(c)
}
func (rr *CNAME) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad CNAME Target", l}
}
rr.Target = name
return slurpRemainder(c)
}
func (rr *DNAME) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad DNAME Target", l}
}
rr.Target = name
return slurpRemainder(c)
}
func (rr *SOA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
ns, nsOk := toAbsoluteName(l.token, o)
if l.err || !nsOk {
return &ParseError{"", "bad SOA Ns", l}
}
rr.Ns = ns
c.Next() // zBlank
l, _ = c.Next()
rr.Mbox = l.token
mbox, mboxOk := toAbsoluteName(l.token, o)
if l.err || !mboxOk {
return &ParseError{"", "bad SOA Mbox", l}
}
rr.Mbox = mbox
c.Next() // zBlank
var (
v uint32
ok bool
)
for i := 0; i < 5; i++ {
l, _ = c.Next()
if l.err {
return &ParseError{"", "bad SOA zone parameter", l}
}
if j, err := strconv.ParseUint(l.token, 10, 32); err != nil {
if i == 0 {
// Serial must be a number
return &ParseError{"", "bad SOA zone parameter", l}
}
// We allow other fields to be unitful duration strings
if v, ok = stringToTTL(l.token); !ok {
return &ParseError{"", "bad SOA zone parameter", l}
}
} else {
v = uint32(j)
}
switch i {
case 0:
rr.Serial = v
c.Next() // zBlank
case 1:
rr.Refresh = v
c.Next() // zBlank
case 2:
rr.Retry = v
c.Next() // zBlank
case 3:
rr.Expire = v
c.Next() // zBlank
case 4:
rr.Minttl = v
}
}
return slurpRemainder(c)
}
func (rr *SRV) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad SRV Priority", l}
}
rr.Priority = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil || l.err {
return &ParseError{"", "bad SRV Weight", l}
}
rr.Weight = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e2 := strconv.ParseUint(l.token, 10, 16)
if e2 != nil || l.err {
return &ParseError{"", "bad SRV Port", l}
}
rr.Port = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Target = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad SRV Target", l}
}
rr.Target = name
return slurpRemainder(c)
}
func (rr *NAPTR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad NAPTR Order", l}
}
rr.Order = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil || l.err {
return &ParseError{"", "bad NAPTR Preference", l}
}
rr.Preference = uint16(i)
// Flags
c.Next() // zBlank
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Flags", l}
}
l, _ = c.Next() // Either String or Quote
if l.value == zString {
rr.Flags = l.token
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Flags", l}
}
} else if l.value == zQuote {
rr.Flags = ""
} else {
return &ParseError{"", "bad NAPTR Flags", l}
}
// Service
c.Next() // zBlank
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Service", l}
}
l, _ = c.Next() // Either String or Quote
if l.value == zString {
rr.Service = l.token
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Service", l}
}
} else if l.value == zQuote {
rr.Service = ""
} else {
return &ParseError{"", "bad NAPTR Service", l}
}
// Regexp
c.Next() // zBlank
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Regexp", l}
}
l, _ = c.Next() // Either String or Quote
if l.value == zString {
rr.Regexp = l.token
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Regexp", l}
}
} else if l.value == zQuote {
rr.Regexp = ""
} else {
return &ParseError{"", "bad NAPTR Regexp", l}
}
// After quote no space??
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Replacement = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NAPTR Replacement", l}
}
rr.Replacement = name
return slurpRemainder(c)
}
func (rr *TALINK) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
previousName, previousNameOk := toAbsoluteName(l.token, o)
if l.err || !previousNameOk {
return &ParseError{"", "bad TALINK PreviousName", l}
}
rr.PreviousName = previousName
c.Next() // zBlank
l, _ = c.Next()
rr.NextName = l.token
nextName, nextNameOk := toAbsoluteName(l.token, o)
if l.err || !nextNameOk {
return &ParseError{"", "bad TALINK NextName", l}
}
rr.NextName = nextName
return slurpRemainder(c)
}
func (rr *LOC) parse(c *zlexer, o string) *ParseError {
// Non zero defaults for LOC record, see RFC 1876, Section 3.
rr.Size = 0x12 // 1e2 cm (1m)
rr.HorizPre = 0x16 // 1e6 cm (10000m)
rr.VertPre = 0x13 // 1e3 cm (10m)
ok := false
// North
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err || i > 90 {
return &ParseError{"", "bad LOC Latitude", l}
}
rr.Latitude = 1000 * 60 * 60 * uint32(i)
c.Next() // zBlank
// Either number, 'N' or 'S'
l, _ = c.Next()
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
goto East
}
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 {
return &ParseError{"", "bad LOC Latitude minutes", l}
} else {
rr.Latitude += 1000 * 60 * uint32(i)
}
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 {
return &ParseError{"", "bad LOC Latitude seconds", l}
} else {
rr.Latitude += uint32(1000 * i)
}
c.Next() // zBlank
// Either number, 'N' or 'S'
l, _ = c.Next()
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
goto East
}
// If still alive, flag an error
return &ParseError{"", "bad LOC Latitude North/South", l}
East:
// East
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 180 {
return &ParseError{"", "bad LOC Longitude", l}
} else {
rr.Longitude = 1000 * 60 * 60 * uint32(i)
}
c.Next() // zBlank
// Either number, 'E' or 'W'
l, _ = c.Next()
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
goto Altitude
}
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 {
return &ParseError{"", "bad LOC Longitude minutes", l}
} else {
rr.Longitude += 1000 * 60 * uint32(i)
}
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 {
return &ParseError{"", "bad LOC Longitude seconds", l}
} else {
rr.Longitude += uint32(1000 * i)
}
c.Next() // zBlank
// Either number, 'E' or 'W'
l, _ = c.Next()
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
goto Altitude
}
// If still alive, flag an error
return &ParseError{"", "bad LOC Longitude East/West", l}
Altitude:
c.Next() // zBlank
l, _ = c.Next()
if l.token == "" || l.err {
return &ParseError{"", "bad LOC Altitude", l}
}
if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
l.token = l.token[0 : len(l.token)-1]
}
if i, err := strconv.ParseFloat(l.token, 64); err != nil {
return &ParseError{"", "bad LOC Altitude", l}
} else {
rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
}
// And now optionally the other values
l, _ = c.Next()
count := 0
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zString:
switch count {
case 0: // Size
exp, m, ok := stringToCm(l.token)
if !ok {
return &ParseError{"", "bad LOC Size", l}
}
rr.Size = exp&0x0f | m<<4&0xf0
case 1: // HorizPre
exp, m, ok := stringToCm(l.token)
if !ok {
return &ParseError{"", "bad LOC HorizPre", l}
}
rr.HorizPre = exp&0x0f | m<<4&0xf0
case 2: // VertPre
exp, m, ok := stringToCm(l.token)
if !ok {
return &ParseError{"", "bad LOC VertPre", l}
}
rr.VertPre = exp&0x0f | m<<4&0xf0
}
count++
case zBlank:
// Ok
default:
return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *HIP) parse(c *zlexer, o string) *ParseError {
// HitLength is not represented
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad HIP PublicKeyAlgorithm", l}
}
rr.PublicKeyAlgorithm = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
if l.token == "" || l.err {
return &ParseError{"", "bad HIP Hit", l}
}
rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
rr.HitLength = uint8(len(rr.Hit)) / 2
c.Next() // zBlank
l, _ = c.Next() // zString
if l.token == "" || l.err {
return &ParseError{"", "bad HIP PublicKey", l}
}
rr.PublicKey = l.token // This cannot contain spaces
decodedPK, decodedPKerr := base64.StdEncoding.DecodeString(rr.PublicKey)
if decodedPKerr != nil {
return &ParseError{"", "bad HIP PublicKey", l}
}
rr.PublicKeyLength = uint16(len(decodedPK))
// RendezvousServers (if any)
l, _ = c.Next()
var xs []string
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zString:
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad HIP RendezvousServers", l}
}
xs = append(xs, name)
case zBlank:
// Ok
default:
return &ParseError{"", "bad HIP RendezvousServers", l}
}
l, _ = c.Next()
}
rr.RendezvousServers = xs
return nil
}
func (rr *CERT) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if v, ok := StringToCertType[l.token]; ok {
rr.Type = v
} else if i, err := strconv.ParseUint(l.token, 10, 16); err != nil {
return &ParseError{"", "bad CERT Type", l}
} else {
rr.Type = uint16(i)
}
c.Next() // zBlank
l, _ = c.Next() // zString
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad CERT KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
if v, ok := StringToAlgorithm[l.token]; ok {
rr.Algorithm = v
} else if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
return &ParseError{"", "bad CERT Algorithm", l}
} else {
rr.Algorithm = uint8(i)
}
s, e1 := endingToString(c, "bad CERT Certificate")
if e1 != nil {
return e1
}
rr.Certificate = s
return nil
}
func (rr *OPENPGPKEY) parse(c *zlexer, o string) *ParseError {
s, e := endingToString(c, "bad OPENPGPKEY PublicKey")
if e != nil {
return e
}
rr.PublicKey = s
return nil
}
func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
j, e := strconv.ParseUint(l.token, 10, 32)
if e != nil {
// Serial must be a number
return &ParseError{"", "bad CSYNC serial", l}
}
rr.Serial = uint32(j)
c.Next() // zBlank
l, _ = c.Next()
j, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil {
// Serial must be a number
return &ParseError{"", "bad CSYNC flags", l}
}
rr.Flags = uint16(j)
rr.TypeBitMap = make([]uint16, 0)
var (
k uint16
ok bool
)
l, _ = c.Next()
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zBlank:
// Ok
case zString:
tokenUpper := strings.ToUpper(l.token)
if k, ok = StringToType[tokenUpper]; !ok {
if k, ok = typeToInt(l.token); !ok {
return &ParseError{"", "bad CSYNC TypeBitMap", l}
}
}
rr.TypeBitMap = append(rr.TypeBitMap, k)
default:
return &ParseError{"", "bad CSYNC TypeBitMap", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return &ParseError{"", "bad ZONEMD Serial", l}
}
rr.Serial = uint32(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad ZONEMD Scheme", l}
}
rr.Scheme = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, err := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err {
return &ParseError{"", "bad ZONEMD Hash Algorithm", l}
}
rr.Hash = uint8(i)
s, e2 := endingToString(c, "bad ZONEMD Digest")
if e2 != nil {
return e2
}
rr.Digest = s
return nil
}
func (rr *SIG) parse(c *zlexer, o string) *ParseError { return rr.RRSIG.parse(c, o) }
func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; !ok {
if strings.HasPrefix(tokenUpper, "TYPE") {
t, ok = typeToInt(l.token)
if !ok {
return &ParseError{"", "bad RRSIG Typecovered", l}
}
rr.TypeCovered = t
} else {
return &ParseError{"", "bad RRSIG Typecovered", l}
}
} else {
rr.TypeCovered = t
}
c.Next() // zBlank
l, _ = c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad RRSIG Algorithm", l}
}
rr.Algorithm = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad RRSIG Labels", l}
}
rr.Labels = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 32)
if e2 != nil || l.err {
return &ParseError{"", "bad RRSIG OrigTtl", l}
}
rr.OrigTtl = uint32(i)
c.Next() // zBlank
l, _ = c.Next()
if i, err := StringToTime(l.token); err != nil {
// Try to see if all numeric and use it as epoch
if i, err := strconv.ParseUint(l.token, 10, 32); err == nil {
rr.Expiration = uint32(i)
} else {
return &ParseError{"", "bad RRSIG Expiration", l}
}
} else {
rr.Expiration = i
}
c.Next() // zBlank
l, _ = c.Next()
if i, err := StringToTime(l.token); err != nil {
if i, err := strconv.ParseUint(l.token, 10, 32); err == nil {
rr.Inception = uint32(i)
} else {
return &ParseError{"", "bad RRSIG Inception", l}
}
} else {
rr.Inception = i
}
c.Next() // zBlank
l, _ = c.Next()
i, e3 := strconv.ParseUint(l.token, 10, 16)
if e3 != nil || l.err {
return &ParseError{"", "bad RRSIG KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
rr.SignerName = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad RRSIG SignerName", l}
}
rr.SignerName = name
s, e4 := endingToString(c, "bad RRSIG Signature")
if e4 != nil {
return e4
}
rr.Signature = s
return nil
}
func (rr *NSEC) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NSEC NextDomain", l}
}
rr.NextDomain = name
rr.TypeBitMap = make([]uint16, 0)
var (
k uint16
ok bool
)
l, _ = c.Next()
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zBlank:
// Ok
case zString:
tokenUpper := strings.ToUpper(l.token)
if k, ok = StringToType[tokenUpper]; !ok {
if k, ok = typeToInt(l.token); !ok {
return &ParseError{"", "bad NSEC TypeBitMap", l}
}
}
rr.TypeBitMap = append(rr.TypeBitMap, k)
default:
return &ParseError{"", "bad NSEC TypeBitMap", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad NSEC3 Hash", l}
}
rr.Hash = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad NSEC3 Flags", l}
}
rr.Flags = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 16)
if e2 != nil || l.err {
return &ParseError{"", "bad NSEC3 Iterations", l}
}
rr.Iterations = uint16(i)
c.Next()
l, _ = c.Next()
if l.token == "" || l.err {
return &ParseError{"", "bad NSEC3 Salt", l}
}
if l.token != "-" {
rr.SaltLength = uint8(len(l.token)) / 2
rr.Salt = l.token
}
c.Next()
l, _ = c.Next()
if l.token == "" || l.err {
return &ParseError{"", "bad NSEC3 NextDomain", l}
}
rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
rr.NextDomain = l.token
rr.TypeBitMap = make([]uint16, 0)
var (
k uint16
ok bool
)
l, _ = c.Next()
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zBlank:
// Ok
case zString:
tokenUpper := strings.ToUpper(l.token)
if k, ok = StringToType[tokenUpper]; !ok {
if k, ok = typeToInt(l.token); !ok {
return &ParseError{"", "bad NSEC3 TypeBitMap", l}
}
}
rr.TypeBitMap = append(rr.TypeBitMap, k)
default:
return &ParseError{"", "bad NSEC3 TypeBitMap", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Hash", l}
}
rr.Hash = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Flags", l}
}
rr.Flags = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 16)
if e2 != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Iterations", l}
}
rr.Iterations = uint16(i)
c.Next()
l, _ = c.Next()
if l.token != "-" {
rr.SaltLength = uint8(len(l.token) / 2)
rr.Salt = l.token
}
return slurpRemainder(c)
}
func (rr *EUI48) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if len(l.token) != 17 || l.err {
return &ParseError{"", "bad EUI48 Address", l}
}
addr := make([]byte, 12)
dash := 0
for i := 0; i < 10; i += 2 {
addr[i] = l.token[i+dash]
addr[i+1] = l.token[i+1+dash]
dash++
if l.token[i+1+dash] != '-' {
return &ParseError{"", "bad EUI48 Address", l}
}
}
addr[10] = l.token[15]
addr[11] = l.token[16]
i, e := strconv.ParseUint(string(addr), 16, 48)
if e != nil {
return &ParseError{"", "bad EUI48 Address", l}
}
rr.Address = i
return slurpRemainder(c)
}
func (rr *EUI64) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if len(l.token) != 23 || l.err {
return &ParseError{"", "bad EUI64 Address", l}
}
addr := make([]byte, 16)
dash := 0
for i := 0; i < 14; i += 2 {
addr[i] = l.token[i+dash]
addr[i+1] = l.token[i+1+dash]
dash++
if l.token[i+1+dash] != '-' {
return &ParseError{"", "bad EUI64 Address", l}
}
}
addr[14] = l.token[21]
addr[15] = l.token[22]
i, e := strconv.ParseUint(string(addr), 16, 64)
if e != nil {
return &ParseError{"", "bad EUI68 Address", l}
}
rr.Address = i
return slurpRemainder(c)
}
func (rr *SSHFP) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad SSHFP Algorithm", l}
}
rr.Algorithm = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad SSHFP Type", l}
}
rr.Type = uint8(i)
c.Next() // zBlank
s, e2 := endingToString(c, "bad SSHFP Fingerprint")
if e2 != nil {
return e2
}
rr.FingerPrint = s
return nil
}
func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad " + typ + " Flags", l}
}
rr.Flags = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad " + typ + " Protocol", l}
}
rr.Protocol = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad " + typ + " Algorithm", l}
}
rr.Algorithm = uint8(i)
s, e3 := endingToString(c, "bad "+typ+" PublicKey")
if e3 != nil {
return e3
}
rr.PublicKey = s
return nil
}
func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "DNSKEY") }
func (rr *KEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "KEY") }
func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "CDNSKEY") }
func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DS") }
func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") }
func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") }
func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad RKEY Flags", l}
}
rr.Flags = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad RKEY Protocol", l}
}
rr.Protocol = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad RKEY Algorithm", l}
}
rr.Algorithm = uint8(i)
s, e3 := endingToString(c, "bad RKEY PublicKey")
if e3 != nil {
return e3
}
rr.PublicKey = s
return nil
}
func (rr *EID) parse(c *zlexer, o string) *ParseError {
s, e := endingToString(c, "bad EID Endpoint")
if e != nil {
return e
}
rr.Endpoint = s
return nil
}
func (rr *NIMLOC) parse(c *zlexer, o string) *ParseError {
s, e := endingToString(c, "bad NIMLOC Locator")
if e != nil {
return e
}
rr.Locator = s
return nil
}
func (rr *GPOS) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
_, e := strconv.ParseFloat(l.token, 64)
if e != nil || l.err {
return &ParseError{"", "bad GPOS Longitude", l}
}
rr.Longitude = l.token
c.Next() // zBlank
l, _ = c.Next()
_, e1 := strconv.ParseFloat(l.token, 64)
if e1 != nil || l.err {
return &ParseError{"", "bad GPOS Latitude", l}
}
rr.Latitude = l.token
c.Next() // zBlank
l, _ = c.Next()
_, e2 := strconv.ParseFloat(l.token, 64)
if e2 != nil || l.err {
return &ParseError{"", "bad GPOS Altitude", l}
}
rr.Altitude = l.token
return slurpRemainder(c)
}
func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad " + typ + " KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
tokenUpper := strings.ToUpper(l.token)
i, ok := StringToAlgorithm[tokenUpper]
if !ok || l.err {
return &ParseError{"", "bad " + typ + " Algorithm", l}
}
rr.Algorithm = i
} else {
rr.Algorithm = uint8(i)
}
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad " + typ + " DigestType", l}
}
rr.DigestType = uint8(i)
s, e2 := endingToString(c, "bad "+typ+" Digest")
if e2 != nil {
return e2
}
rr.Digest = s
return nil
}
func (rr *TA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad TA KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
tokenUpper := strings.ToUpper(l.token)
i, ok := StringToAlgorithm[tokenUpper]
if !ok || l.err {
return &ParseError{"", "bad TA Algorithm", l}
}
rr.Algorithm = i
} else {
rr.Algorithm = uint8(i)
}
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad TA DigestType", l}
}
rr.DigestType = uint8(i)
s, e2 := endingToString(c, "bad TA Digest")
if e2 != nil {
return e2
}
rr.Digest = s
return nil
}
func (rr *TLSA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad TLSA Usage", l}
}
rr.Usage = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad TLSA Selector", l}
}
rr.Selector = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad TLSA MatchingType", l}
}
rr.MatchingType = uint8(i)
// So this needs be e2 (i.e. different than e), because...??t
s, e3 := endingToString(c, "bad TLSA Certificate")
if e3 != nil {
return e3
}
rr.Certificate = s
return nil
}
func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad SMIMEA Usage", l}
}
rr.Usage = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad SMIMEA Selector", l}
}
rr.Selector = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad SMIMEA MatchingType", l}
}
rr.MatchingType = uint8(i)
// So this needs be e2 (i.e. different than e), because...??t
s, e3 := endingToString(c, "bad SMIMEA Certificate")
if e3 != nil {
return e3
}
rr.Certificate = s
return nil
}
func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if l.token != "\\#" {
return &ParseError{"", "bad RFC3597 Rdata", l}
}
c.Next() // zBlank
l, _ = c.Next()
rdlength, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad RFC3597 Rdata ", l}
}
s, e1 := endingToString(c, "bad RFC3597 Rdata")
if e1 != nil {
return e1
}
if int(rdlength)*2 != len(s) {
return &ParseError{"", "bad RFC3597 Rdata", l}
}
rr.Rdata = s
return nil
}
func (rr *SPF) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad SPF Txt")
if e != nil {
return e
}
rr.Txt = s
return nil
}
func (rr *AVC) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad AVC Txt")
if e != nil {
return e
}
rr.Txt = s
return nil
}
func (rr *TXT) parse(c *zlexer, o string) *ParseError {
// no zBlank reading here, because all this rdata is TXT
s, e := endingToTxtSlice(c, "bad TXT Txt")
if e != nil {
return e
}
rr.Txt = s
return nil
}
// identical to setTXT
func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad NINFO ZSData")
if e != nil {
return e
}
rr.ZSData = s
return nil
}
func (rr *URI) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad URI Priority", l}
}
rr.Priority = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil || l.err {
return &ParseError{"", "bad URI Weight", l}
}
rr.Weight = uint16(i)
c.Next() // zBlank
s, e2 := endingToTxtSlice(c, "bad URI Target")
if e2 != nil {
return e2
}
if len(s) != 1 {
return &ParseError{"", "bad URI Target", l}
}
rr.Target = s[0]
return nil
}
func (rr *DHCID) parse(c *zlexer, o string) *ParseError {
// awesome record to parse!
s, e := endingToString(c, "bad DHCID Digest")
if e != nil {
return e
}
rr.Digest = s
return nil
}
func (rr *NID) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad NID Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
u, e1 := stringToNodeID(l)
if e1 != nil || l.err {
return e1
}
rr.NodeID = u
return slurpRemainder(c)
}
func (rr *L32) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad L32 Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Locator32 = net.ParseIP(l.token)
if rr.Locator32 == nil || l.err {
return &ParseError{"", "bad L32 Locator", l}
}
return slurpRemainder(c)
}
func (rr *LP) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad LP Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Fqdn = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad LP Fqdn", l}
}
rr.Fqdn = name
return slurpRemainder(c)
}
func (rr *L64) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad L64 Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
u, e1 := stringToNodeID(l)
if e1 != nil || l.err {
return e1
}
rr.Locator64 = u
return slurpRemainder(c)
}
func (rr *UID) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return &ParseError{"", "bad UID Uid", l}
}
rr.Uid = uint32(i)
return slurpRemainder(c)
}
func (rr *GID) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return &ParseError{"", "bad GID Gid", l}
}
rr.Gid = uint32(i)
return slurpRemainder(c)
}
func (rr *UINFO) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad UINFO Uinfo")
if e != nil {
return e
}
if ln := len(s); ln == 0 {
return nil
}
rr.Uinfo = s[0] // silently discard anything after the first character-string
return nil
}
func (rr *PX) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad PX Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Map822 = l.token
map822, map822Ok := toAbsoluteName(l.token, o)
if l.err || !map822Ok {
return &ParseError{"", "bad PX Map822", l}
}
rr.Map822 = map822
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Mapx400 = l.token
mapx400, mapx400Ok := toAbsoluteName(l.token, o)
if l.err || !mapx400Ok {
return &ParseError{"", "bad PX Mapx400", l}
}
rr.Mapx400 = mapx400
return slurpRemainder(c)
}
func (rr *CAA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad CAA Flag", l}
}
rr.Flag = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
if l.value != zString {
return &ParseError{"", "bad CAA Tag", l}
}
rr.Tag = l.token
c.Next() // zBlank
s, e1 := endingToTxtSlice(c, "bad CAA Value")
if e1 != nil {
return e1
}
if len(s) != 1 {
return &ParseError{"", "bad CAA Value", l}
}
rr.Value = s[0]
return nil
}
func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
// Algorithm
if l.value != zString {
return &ParseError{"", "bad TKEY algorithm", l}
}
rr.Algorithm = l.token
c.Next() // zBlank
// Get the key length and key values
l, _ = c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad TKEY key length", l}
}
rr.KeySize = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if l.value != zString {
return &ParseError{"", "bad TKEY key", l}
}
rr.Key = l.token
c.Next() // zBlank
// Get the otherdata length and string data
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad TKEY otherdata length", l}
}
rr.OtherLen = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if l.value != zString {
return &ParseError{"", "bad TKEY otherday", l}
}
rr.OtherData = l.token
return nil
}
func (rr *APL) parse(c *zlexer, o string) *ParseError {
var prefixes []APLPrefix
for {
l, _ := c.Next()
if l.value == zNewline || l.value == zEOF {
break
}
if l.value == zBlank && prefixes != nil {
continue
}
if l.value != zString {
return &ParseError{"", "unexpected APL field", l}
}
// Expected format: [!]afi:address/prefix
colon := strings.IndexByte(l.token, ':')
if colon == -1 {
return &ParseError{"", "missing colon in APL field", l}
}
family, cidr := l.token[:colon], l.token[colon+1:]
var negation bool
if family != "" && family[0] == '!' {
negation = true
family = family[1:]
}
afi, e := strconv.ParseUint(family, 10, 16)
if e != nil {
return &ParseError{"", "failed to parse APL family: " + e.Error(), l}
}
var addrLen int
switch afi {
case 1:
addrLen = net.IPv4len
case 2:
addrLen = net.IPv6len
default:
return &ParseError{"", "unrecognized APL family", l}
}
ip, subnet, e1 := net.ParseCIDR(cidr)
if e1 != nil {
return &ParseError{"", "failed to parse APL address: " + e1.Error(), l}
}
if !ip.Equal(subnet.IP) {
return &ParseError{"", "extra bits in APL address", l}
}
if len(subnet.IP) != addrLen {
return &ParseError{"", "address mismatch with the APL family", l}
}
prefixes = append(prefixes, APLPrefix{
Negation: negation,
Network: *subnet,
})
}
rr.Prefixes = prefixes
return nil
}
package dns
// Implement a simple scanner, return a byte stream from an io reader.
import (
"bufio"
"io"
"text/scanner"
)
type scan struct {
src *bufio.Reader
position scanner.Position
eof bool // Have we just seen a eof
}
func scanInit(r io.Reader) *scan {
s := new(scan)
s.src = bufio.NewReader(r)
s.position.Line = 1
return s
}
// tokenText returns the next byte from the input
func (s *scan) tokenText() (byte, error) {
c, err := s.src.ReadByte()
if err != nil {
return c, err
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if s.eof == true {
s.position.Line++
s.position.Column = 0
s.eof = false
}
if c == '\n' {
s.eof = true
return c, nil
}
s.position.Column++
return c, nil
}
package dns
import (
"sync"
)
// ServeMux is an DNS request multiplexer. It matches the zone name of
// each incoming request against a list of registered patterns add calls
// the handler for the pattern that most closely matches the zone name.
//
// ServeMux is DNSSEC aware, meaning that queries for the DS record are
// redirected to the parent zone (if that is also registered), otherwise
// the child gets the query.
//
// ServeMux is also safe for concurrent access from multiple goroutines.
//
// The zero ServeMux is empty and ready for use.
type ServeMux struct {
z map[string]Handler
m sync.RWMutex
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux {
return new(ServeMux)
}
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()
func (mux *ServeMux) match(q string, t uint16) Handler {
mux.m.RLock()
defer mux.m.RUnlock()
if mux.z == nil {
return nil
}
q = CanonicalName(q)
var handler Handler
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
if h, ok := mux.z[q[off:]]; ok {
if t != TypeDS {
return h
}
// Continue for DS to see if we have a parent too, if so delegate to the parent
handler = h
}
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := mux.z["."]; ok {
return h
}
return handler
}
// Handle adds a handler to the ServeMux for pattern.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
if mux.z == nil {
mux.z = make(map[string]Handler)
}
mux.z[CanonicalName(pattern)] = handler
mux.m.Unlock()
}
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// HandleRemove deregisters the handler specific for pattern from the ServeMux.
func (mux *ServeMux) HandleRemove(pattern string) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
delete(mux.z, CanonicalName(pattern))
mux.m.Unlock()
}
// ServeDNS dispatches the request to the handler whose pattern most
// closely matches the request message.
//
// ServeDNS is DNSSEC aware, meaning that queries for the DS record
// are redirected to the parent zone (if that is also registered),
// otherwise the child gets the query.
//
// If no handler is found, or there is no question, a standard REFUSED
// message is returned
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
var h Handler
if len(req.Question) >= 1 { // allow more than one question
h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
}
if h != nil {
h.ServeDNS(w, req)
} else {
handleRefused(w, req)
}
}
// Handle registers the handler with the given pattern
// in the DefaultServeMux. The documentation for
// ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleRemove deregisters the handle with the given pattern
// in the DefaultServeMux.
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
// HandleFunc registers the handler function with the given pattern
// in the DefaultServeMux.
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
......@@ -3,21 +3,40 @@
package dns
import (
"bytes"
"context"
"crypto/tls"
"encoding/binary"
"errors"
"io"
"net"
"strings"
"sync"
"time"
)
// Maximum number of TCP queries before we close the socket.
// Default maximum number of TCP queries before we close the socket.
const maxTCPQueries = 128
// aLongTimeAgo is a non-zero time, far in the past, used for
// immediate cancelation of network operations.
var aLongTimeAgo = time.Unix(1, 0)
// Handler is implemented by any value that implements ServeDNS.
type Handler interface {
ServeDNS(w ResponseWriter, r *Msg)
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as DNS handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Msg)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
f(w, r)
}
// A ResponseWriter interface is used by an DNS handler to
// construct an DNS response.
type ResponseWriter interface {
......@@ -40,49 +59,35 @@ type ResponseWriter interface {
Hijack()
}
// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
// when available.
type ConnectionStater interface {
ConnectionState() *tls.ConnectionState
}
type response struct {
closed bool // connection has been closed
hijacked bool // connection has been hijacked by handler
tsigStatus error
tsigTimersOnly bool
tsigStatus error
tsigRequestMAC string
tsigSecret map[string]string // the tsig secrets
udp *net.UDPConn // i/o connection if UDP was used
tcp *net.TCPConn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right
remoteAddr net.Addr // address of the client
writer Writer // writer to output the raw DNS bits
}
// ServeMux is an DNS request multiplexer. It matches the
// zone name of each incoming request against a list of
// registered patterns add calls the handler for the pattern
// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning
// that queries for the DS record are redirected to the parent zone (if that
// is also registered), otherwise the child gets the query.
// ServeMux is also safe for concurrent access from multiple goroutines.
type ServeMux struct {
z map[string]Handler
m *sync.RWMutex
tsigProvider TsigProvider
udp net.PacketConn // i/o connection if UDP was used
tcp net.Conn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right
pcSession net.Addr // address to use when writing to a generic net.PacketConn
writer Writer // writer to output the raw DNS bits
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} }
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as DNS handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Msg)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
f(w, r)
// handleRefused returns a HandlerFunc that returns REFUSED for every request it gets.
func handleRefused(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeRefused)
w.WriteMsg(m)
}
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
// Deprecated: This function is going away.
func HandleFailed(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeServerFailure)
......@@ -90,115 +95,42 @@ func HandleFailed(w ResponseWriter, r *Msg) {
w.WriteMsg(m)
}
func failedHandler() Handler { return HandlerFunc(HandleFailed) }
// ListenAndServe Starts a server on addresss and network speficied. Invoke handler
// ListenAndServe Starts a server on address and network specified Invoke handler
// for incoming queries.
func ListenAndServe(addr string, network string, handler Handler) error {
server := &Server{Addr: addr, Net: network, Handler: handler}
return server.ListenAndServe()
}
// ActivateAndServe activates a server with a listener from systemd,
// l and p should not both be non-nil.
// If both l and p are not nil only p will be used.
// Invoke handler for incoming queries.
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
server := &Server{Listener: l, PacketConn: p, Handler: handler}
return server.ActivateAndServe()
}
func (mux *ServeMux) match(q string, t uint16) Handler {
mux.m.RLock()
defer mux.m.RUnlock()
var handler Handler
b := make([]byte, len(q)) // worst case, one label of length q
off := 0
end := false
for {
l := len(q[off:])
for i := 0; i < l; i++ {
b[i] = q[off+i]
if b[i] >= 'A' && b[i] <= 'Z' {
b[i] |= ('a' - 'A')
}
}
if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
if t != TypeDS {
return h
}
// Continue for DS to see if we have a parent too, if so delegeate to the parent
handler = h
}
off, end = NextLabel(q, off)
if end {
break
}
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := mux.z["."]; ok {
return h
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
// http://golang.org/pkg/net/http/#ListenAndServeTLS
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
return handler
}
// Handle adds a handler to the ServeMux for pattern.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
config := tls.Config{
Certificates: []tls.Certificate{cert},
}
mux.m.Lock()
mux.z[Fqdn(pattern)] = handler
mux.m.Unlock()
}
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// HandleRemove deregistrars the handler specific for pattern from the ServeMux.
func (mux *ServeMux) HandleRemove(pattern string) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
server := &Server{
Addr: addr,
Net: "tcp-tls",
TLSConfig: &config,
Handler: handler,
}
mux.m.Lock()
delete(mux.z, Fqdn(pattern))
mux.m.Unlock()
}
// ServeDNS dispatches the request to the handler whose
// pattern most closely matches the request message. If DefaultServeMux
// is used the correct thing for DS queries is done: a possible parent
// is sought.
// If no handler is found a standard SERVFAIL message is returned
// If the request message does not have exactly one question in the
// question section a SERVFAIL is returned, unlesss Unsafe is true.
func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) {
var h Handler
if len(request.Question) < 1 { // allow more than one question
h = failedHandler()
} else {
if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil {
h = failedHandler()
}
}
h.ServeDNS(w, request)
return server.ListenAndServe()
}
// Handle registers the handler with the given pattern
// in the DefaultServeMux. The documentation for
// ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleRemove deregisters the handle with the given pattern
// in the DefaultServeMux.
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
// HandleFunc registers the handler function with the given pattern
// in the DefaultServeMux.
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
DefaultServeMux.HandleFunc(pattern, handler)
// ActivateAndServe activates a server with a listener from systemd,
// l and p should not both be non-nil.
// If both l and p are not nil only p will be used.
// Invoke handler for incoming queries.
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
server := &Server{Listener: l, PacketConn: p, Handler: handler}
return server.ActivateAndServe()
}
// Writer writes raw DNS messages; each call to Write should send an entire message.
......@@ -210,28 +142,46 @@ type Writer interface {
type Reader interface {
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error)
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
}
// defaultReader is an adapter for the Server struct that implements the Reader interface
// using the readTCP and readUDP func of the embedded Server.
// PacketConnReader is an optional interface that Readers can implement to support using generic net.PacketConns.
type PacketConnReader interface {
Reader
// ReadPacketConn reads a raw message from a generic net.PacketConn UDP connection. Implementations may
// alter connection properties, for example the read-deadline.
ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error)
}
// defaultReader is an adapter for the Server struct that implements the Reader and
// PacketConnReader interfaces using the readTCP, readUDP and readPacketConn funcs
// of the embedded Server.
type defaultReader struct {
*Server
}
func (dr *defaultReader) ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
var _ PacketConnReader = defaultReader{}
func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
return dr.readTCP(conn, timeout)
}
func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
return dr.readUDP(conn, timeout)
}
func (dr defaultReader) ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) {
return dr.readPacketConn(conn, timeout)
}
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
// Implementations should never return a nil Reader.
// Readers should also implement the optional PacketConnReader interface.
// PacketConnReader is required to use a generic net.PacketConn.
type DecorateReader func(Reader) Reader
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
......@@ -242,10 +192,12 @@ type DecorateWriter func(Writer) Writer
type Server struct {
// Address to listen on, ":dns" if empty.
Addr string
// if "tcp" it will invoke a TCP listener, otherwise an UDP one.
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
Net string
// TCP Listener to use, this is to aid in systemd's socket activation.
Listener net.Listener
// TLS connection configuration
TLSConfig *tls.Config
// UDP "Listener" to use, this is to aid in systemd's socket activation.
PacketConn net.PacketConn
// Handler to invoke, dns.DefaultServeMux if nil.
......@@ -259,393 +211,524 @@ type Server struct {
WriteTimeout time.Duration
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
IdleTimeout func() time.Duration
// Secret(s) for Tsig map[<zonename>]<base64 secret>.
// An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
TsigProvider TsigProvider
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
TsigSecret map[string]string
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
// the handler. It will specfically not check if the query has the QR bit not set.
Unsafe bool
// If NotifyStartedFunc is set it is called once the server has started listening.
NotifyStartedFunc func()
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
DecorateReader DecorateReader
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
MaxTCPQueries int
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
// It is only supported on go1.11+ and when using ListenAndServe.
ReusePort bool
// AcceptMsgFunc will check the incoming message and will reject it early in the process.
// By default DefaultMsgAcceptFunc will be used.
MsgAcceptFunc MsgAcceptFunc
// Shutdown handling
lock sync.RWMutex
started bool
shutdown chan struct{}
conns map[net.Conn]struct{}
// A pool for UDP message buffers.
udpPool sync.Pool
}
func (srv *Server) tsigProvider() TsigProvider {
if srv.TsigProvider != nil {
return srv.TsigProvider
}
if srv.TsigSecret != nil {
return tsigSecretProvider(srv.TsigSecret)
}
return nil
}
func (srv *Server) isStarted() bool {
srv.lock.RLock()
started := srv.started
srv.lock.RUnlock()
return started
}
func makeUDPBuffer(size int) func() interface{} {
return func() interface{} {
return make([]byte, size)
}
}
func (srv *Server) init() {
srv.shutdown = make(chan struct{})
srv.conns = make(map[net.Conn]struct{})
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
if srv.MsgAcceptFunc == nil {
srv.MsgAcceptFunc = DefaultMsgAcceptFunc
}
if srv.Handler == nil {
srv.Handler = DefaultServeMux
}
// For graceful shutdown.
stopUDP chan bool
stopTCP chan bool
wgUDP sync.WaitGroup
wgTCP sync.WaitGroup
srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
}
// make start/shutdown not racy
lock sync.Mutex
started bool
func unlockOnce(l sync.Locker) func() {
var once sync.Once
return func() { once.Do(l.Unlock) }
}
// ListenAndServe starts a nameserver on the configured address in *Server.
func (srv *Server) ListenAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
// We can't use defer() becasue serveTCP/serveUDP don't return.
defer unlock()
if srv.started {
srv.lock.Unlock()
return &Error{err: "server already started"}
}
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
srv.started = true
addr := srv.Addr
if addr == "" {
addr = ":domain"
}
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
srv.init()
switch srv.Net {
case "tcp", "tcp4", "tcp6":
a, e := net.ResolveTCPAddr(srv.Net, addr)
if e != nil {
srv.lock.Unlock()
srv.started = false
return e
l, err := listenTCP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
l, e := net.ListenTCP(srv.Net, a)
if e != nil {
srv.lock.Unlock()
srv.started = false
return e
srv.Listener = l
srv.started = true
unlock()
return srv.serveTCP(l)
case "tcp-tls", "tcp4-tls", "tcp6-tls":
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
}
network := strings.TrimSuffix(srv.Net, "-tls")
l, err := listenTCP(network, addr, srv.ReusePort)
if err != nil {
return err
}
l = tls.NewListener(l, srv.TLSConfig)
srv.Listener = l
srv.lock.Unlock()
srv.started = true
unlock()
return srv.serveTCP(l)
case "udp", "udp4", "udp6":
a, e := net.ResolveUDPAddr(srv.Net, addr)
if e != nil {
srv.lock.Unlock()
srv.started = false
return e
}
l, e := net.ListenUDP(srv.Net, a)
if e != nil {
srv.lock.Unlock()
srv.started = false
return e
l, err := listenUDP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
if e := setUDPSocketOptions(l); e != nil {
srv.lock.Unlock()
srv.started = false
u := l.(*net.UDPConn)
if e := setUDPSocketOptions(u); e != nil {
u.Close()
return e
}
srv.PacketConn = l
srv.lock.Unlock()
return srv.serveUDP(l)
srv.started = true
unlock()
return srv.serveUDP(u)
}
srv.lock.Unlock()
srv.started = false
return &Error{err: "bad network"}
}
// ActivateAndServe starts a nameserver with the PacketConn or Listener
// configured in *Server. Its main use is to start a server from systemd.
func (srv *Server) ActivateAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
defer unlock()
if srv.started {
srv.lock.Unlock()
return &Error{err: "server already started"}
}
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
srv.started = true
pConn := srv.PacketConn
l := srv.Listener
if pConn != nil {
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
if t, ok := pConn.(*net.UDPConn); ok {
srv.init()
if srv.PacketConn != nil {
// Check PacketConn interface's type is valid and value
// is not nil
if t, ok := srv.PacketConn.(*net.UDPConn); ok && t != nil {
if e := setUDPSocketOptions(t); e != nil {
srv.lock.Unlock()
srv.started = false
return e
}
srv.lock.Unlock()
return srv.serveUDP(t)
}
srv.started = true
unlock()
return srv.serveUDP(srv.PacketConn)
}
if l != nil {
if t, ok := l.(*net.TCPListener); ok {
srv.lock.Unlock()
return srv.serveTCP(t)
}
if srv.Listener != nil {
srv.started = true
unlock()
return srv.serveTCP(srv.Listener)
}
srv.lock.Unlock()
srv.started = false
return &Error{err: "bad listeners"}
}
// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and
// ActivateAndServe will return. All in progress queries are completed before the server
// is taken down. If the Shutdown is taking longer than the reading timeout an error
// is returned.
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
// ActivateAndServe will return.
func (srv *Server) Shutdown() error {
return srv.ShutdownContext(context.Background())
}
// ShutdownContext shuts down a server. After a call to ShutdownContext,
// ListenAndServe and ActivateAndServe will return.
//
// A context.Context may be passed to limit how long to wait for connections
// to terminate.
func (srv *Server) ShutdownContext(ctx context.Context) error {
srv.lock.Lock()
if !srv.started {
srv.lock.Unlock()
return &Error{err: "server not started"}
}
srv.started = false
net, addr := srv.Net, srv.Addr
switch {
case srv.Listener != nil:
a := srv.Listener.Addr()
net, addr = a.Network(), a.String()
case srv.PacketConn != nil:
a := srv.PacketConn.LocalAddr()
net, addr = a.Network(), a.String()
if srv.PacketConn != nil {
srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
srv.lock.Unlock()
fin := make(chan bool)
switch net {
case "tcp", "tcp4", "tcp6":
go func() {
srv.stopTCP <- true
srv.wgTCP.Wait()
fin <- true
}()
if srv.Listener != nil {
srv.Listener.Close()
}
case "udp", "udp4", "udp6":
go func() {
srv.stopUDP <- true
srv.wgUDP.Wait()
fin <- true
}()
for rw := range srv.conns {
rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
c := &Client{Net: net}
go c.Exchange(new(Msg), addr) // extra query to help ReadXXX loop to pass
srv.lock.Unlock()
if testShutdownNotify != nil {
testShutdownNotify.Broadcast()
}
var ctxErr error
select {
case <-time.After(srv.getReadTimeout()):
return &Error{err: "server shutdown is pending"}
case <-fin:
return nil
case <-srv.shutdown:
case <-ctx.Done():
ctxErr = ctx.Err()
}
if srv.PacketConn != nil {
srv.PacketConn.Close()
}
return ctxErr
}
var testShutdownNotify *sync.Cond
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
func (srv *Server) getReadTimeout() time.Duration {
rtimeout := dnsTimeout
if srv.ReadTimeout != 0 {
rtimeout = srv.ReadTimeout
return srv.ReadTimeout
}
return rtimeout
return dnsTimeout
}
// serveTCP starts a TCP listener for the server.
// Each request is handled in a separate goroutine.
func (srv *Server) serveTCP(l *net.TCPListener) error {
func (srv *Server) serveTCP(l net.Listener) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
reader := Reader(&defaultReader{srv})
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
for srv.isStarted() {
rw, err := l.Accept()
if err != nil {
if !srv.isStarted() {
return nil
}
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
continue
}
return err
}
srv.lock.Lock()
// Track the connection to allow unblocking reads on shutdown.
srv.conns[rw] = struct{}{}
srv.lock.Unlock()
wg.Add(1)
go srv.serveTCPConn(&wg, rw)
}
return nil
}
// serveUDP starts a UDP listener for the server.
func (srv *Server) serveUDP(l net.PacketConn) error {
defer l.Close()
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
handler := srv.Handler
if handler == nil {
handler = DefaultServeMux
lUDP, isUDP := l.(*net.UDPConn)
readerPC, canPacketConn := reader.(PacketConnReader)
if !isUDP && !canPacketConn {
return &Error{err: "PacketConnReader was not implemented on Reader returned from DecorateReader but is required for net.PacketConn"}
}
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
rtimeout := srv.getReadTimeout()
// deadline is not used here
for {
rw, e := l.AcceptTCP()
if e != nil {
continue
for srv.isStarted() {
var (
m []byte
sPC net.Addr
sUDP *SessionUDP
err error
)
if isUDP {
m, sUDP, err = reader.ReadUDP(lUDP, rtimeout)
} else {
m, sPC, err = readerPC.ReadPacketConn(l, rtimeout)
}
m, e := reader.ReadTCP(rw, rtimeout)
select {
case <-srv.stopTCP:
return nil
default:
if err != nil {
if !srv.isStarted() {
return nil
}
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
continue
}
return err
}
if e != nil {
if len(m) < headerSize {
if cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
continue
}
srv.wgTCP.Add(1)
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
wg.Add(1)
go srv.serveUDPPacket(&wg, m, l, sUDP, sPC)
}
}
// serveUDP starts a UDP listener for the server.
// Each request is handled in a separate goroutine.
func (srv *Server) serveUDP(l *net.UDPConn) error {
defer l.Close()
return nil
}
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
// Serve a new TCP connection.
func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
w := &response{tsigProvider: srv.tsigProvider(), tcp: rw}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
reader := Reader(&defaultReader{srv})
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
handler := srv.Handler
if handler == nil {
handler = DefaultServeMux
idleTimeout := tcpIdleTimeout
if srv.IdleTimeout != nil {
idleTimeout = srv.IdleTimeout()
}
timeout := srv.getReadTimeout()
limit := srv.MaxTCPQueries
if limit == 0 {
limit = maxTCPQueries
}
rtimeout := srv.getReadTimeout()
// deadline is not used here
for {
m, s, e := reader.ReadUDP(l, rtimeout)
select {
case <-srv.stopUDP:
return nil
default:
for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
m, err := reader.ReadTCP(w.tcp, timeout)
if err != nil {
// TODO(tmthrgd): handle error
break
}
if e != nil {
continue
srv.serveDNS(m, w)
if w.closed {
break // Close() was called
}
if w.hijacked {
break // client will call Close() themselves
}
srv.wgUDP.Add(1)
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
// The first read uses the read timeout, the rest use the
// idle timeout.
timeout = idleTimeout
}
if !w.hijacked {
w.Close()
}
srv.lock.Lock()
delete(srv.conns, w.tcp)
srv.lock.Unlock()
wg.Done()
}
// Serve a new connection.
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
// Serve a new UDP request.
func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn, udpSession *SessionUDP, pcSession net.Addr) {
w := &response{tsigProvider: srv.tsigProvider(), udp: u, udpSession: udpSession, pcSession: pcSession}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
q := 0 // counter for the amount of TCP queries we get
srv.serveDNS(m, w)
wg.Done()
}
defer func() {
if u != nil {
srv.wgUDP.Done()
func (srv *Server) serveDNS(m []byte, w *response) {
dh, off, err := unpackMsgHdr(m, 0)
if err != nil {
// Let client hang, they are sending crap; any reply can be used to amplify.
return
}
req := new(Msg)
req.setHdr(dh)
switch action := srv.MsgAcceptFunc(dh); action {
case MsgAccept:
if req.unpack(dh, m, off) == nil {
break
}
if t != nil {
srv.wgTCP.Done()
fallthrough
case MsgReject, MsgRejectNotImplemented:
opcode := req.Opcode
req.SetRcodeFormatError(req)
req.Zero = false
if action == MsgRejectNotImplemented {
req.Opcode = opcode
req.Rcode = RcodeNotImplemented
}
}()
reader := Reader(&defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
Redo:
req := new(Msg)
err := req.Unpack(m)
if err != nil { // Send a FormatError back
x := new(Msg)
x.SetRcodeFormatError(req)
w.WriteMsg(x)
goto Exit
}
if !srv.Unsafe && req.Response {
goto Exit
// Are we allowed to delete any OPT records here?
req.Ns, req.Answer, req.Extra = nil, nil, nil
w.WriteMsg(req)
fallthrough
case MsgIgnore:
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
return
}
w.tsigStatus = nil
if w.tsigSecret != nil {
if w.tsigProvider != nil {
if t := req.IsTsig(); t != nil {
secret := t.Hdr.Name
if _, ok := w.tsigSecret[secret]; !ok {
w.tsigStatus = ErrKeyAlg
}
w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false)
w.tsigStatus = TsigVerifyWithProvider(m, w.tsigProvider, "", false)
w.tsigTimersOnly = false
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
w.tsigRequestMAC = t.MAC
}
}
h.ServeDNS(w, req) // Writes back to the client
Exit:
// TODO(miek): make this number configurable?
if q > maxTCPQueries { // close socket after this many queries
w.Close()
return
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
if w.hijacked {
return // client calls Close()
}
if u != nil { // UDP, "close" and return
w.Close()
return
}
idleTimeout := tcpIdleTimeout
if srv.IdleTimeout != nil {
idleTimeout = srv.IdleTimeout()
}
m, e := reader.ReadTCP(w.tcp, idleTimeout)
if e == nil {
q++
goto Redo
}
w.Close()
return
srv.Handler.ServeDNS(w, req) // Writes back to the client
}
func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
conn.SetReadDeadline(time.Now().Add(timeout))
l := make([]byte, 2)
n, err := conn.Read(l)
if err != nil || n != 2 {
if err != nil {
return nil, err
}
return nil, ErrShortRead
}
length, _ := unpackUint16(l, 0)
if length == 0 {
return nil, ErrShortRead
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
// If we race with ShutdownContext, the read deadline may
// have been set in the distant past to unblock the read
// below. We must not override it, otherwise we may block
// ShutdownContext.
srv.lock.RLock()
if srv.started {
conn.SetReadDeadline(time.Now().Add(timeout))
}
m := make([]byte, int(length))
n, err = conn.Read(m[:int(length)])
if err != nil || n == 0 {
if err != nil {
return nil, err
}
return nil, ErrShortRead
srv.lock.RUnlock()
var length uint16
if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
return nil, err
}
i := n
for i < int(length) {
j, err := conn.Read(m[i:int(length)])
if err != nil {
return nil, err
}
i += j
m := make([]byte, length)
if _, err := io.ReadFull(conn, m); err != nil {
return nil, err
}
n = i
m = m[:n]
return m, nil
}
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
conn.SetReadDeadline(time.Now().Add(timeout))
m := make([]byte, srv.UDPSize)
n, s, e := ReadFromSessionUDP(conn, m)
if e != nil || n == 0 {
if e != nil {
return nil, nil, e
}
return nil, nil, ErrShortRead
srv.lock.RLock()
if srv.started {
// See the comment in readTCP above.
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
m := srv.udpPool.Get().([]byte)
n, s, err := ReadFromSessionUDP(conn, m)
if err != nil {
srv.udpPool.Put(m)
return nil, nil, err
}
m = m[:n]
return m, s, nil
}
func (srv *Server) readPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) {
srv.lock.RLock()
if srv.started {
// See the comment in readTCP above.
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
m := srv.udpPool.Get().([]byte)
n, addr, err := conn.ReadFrom(m)
if err != nil {
srv.udpPool.Put(m)
return nil, nil, err
}
m = m[:n]
return m, addr, nil
}
// WriteMsg implements the ResponseWriter.WriteMsg method.
func (w *response) WriteMsg(m *Msg) (err error) {
if w.closed {
return &Error{err: "WriteMsg called after Close"}
}
var data []byte
if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
if w.tsigProvider != nil { // if no provider, dont check for the tsig (which is a longer check)
if t := m.IsTsig(); t != nil {
data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
data, w.tsigRequestMAC, err = TsigGenerateWithProvider(m, w.tsigProvider, w.tsigRequestMAC, w.tsigTimersOnly)
if err != nil {
return err
}
......@@ -663,38 +746,55 @@ func (w *response) WriteMsg(m *Msg) (err error) {
// Write implements the ResponseWriter.Write method.
func (w *response) Write(m []byte) (int, error) {
if w.closed {
return 0, &Error{err: "Write called after Close"}
}
switch {
case w.udp != nil:
n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
return n, err
case w.tcp != nil:
lm := len(m)
if lm < 2 {
return 0, io.ErrShortBuffer
if u, ok := w.udp.(*net.UDPConn); ok {
return WriteToSessionUDP(u, m, w.udpSession)
}
if lm > MaxMsgSize {
return w.udp.WriteTo(m, w.pcSession)
case w.tcp != nil:
if len(m) > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
l := make([]byte, 2, 2+lm)
l[0], l[1] = packUint16(uint16(lm))
m = append(l, m...)
n, err := io.Copy(w.tcp, bytes.NewReader(m))
return int(n), err
msg := make([]byte, 2+len(m))
binary.BigEndian.PutUint16(msg, uint16(len(m)))
copy(msg[2:], m)
return w.tcp.Write(msg)
default:
panic("dns: internal error: udp and tcp both nil")
}
panic("not reached")
}
// LocalAddr implements the ResponseWriter.LocalAddr method.
func (w *response) LocalAddr() net.Addr {
if w.tcp != nil {
switch {
case w.udp != nil:
return w.udp.LocalAddr()
case w.tcp != nil:
return w.tcp.LocalAddr()
default:
panic("dns: internal error: udp and tcp both nil")
}
return w.udp.LocalAddr()
}
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
func (w *response) RemoteAddr() net.Addr { return w.remoteAddr }
func (w *response) RemoteAddr() net.Addr {
switch {
case w.udpSession != nil:
return w.udpSession.RemoteAddr()
case w.pcSession != nil:
return w.pcSession
case w.tcp != nil:
return w.tcp.RemoteAddr()
default:
panic("dns: internal error: udpSession, pcSession and tcp are all nil")
}
}
// TsigStatus implements the ResponseWriter.TsigStatus method.
func (w *response) TsigStatus() error { return w.tsigStatus }
......@@ -707,11 +807,30 @@ func (w *response) Hijack() { w.hijacked = true }
// Close implements the ResponseWriter.Close method
func (w *response) Close() error {
// Can't close the udp conn, as that is actually the listener.
if w.tcp != nil {
e := w.tcp.Close()
w.tcp = nil
return e
if w.closed {
return &Error{err: "connection already closed"}
}
w.closed = true
switch {
case w.udp != nil:
// Can't close the udp conn, as that is actually the listener.
return nil
case w.tcp != nil:
return w.tcp.Close()
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
func (w *response) ConnectionState() *tls.ConnectionState {
type tlsConnectionStater interface {
ConnectionState() tls.ConnectionState
}
if v, ok := w.tcp.(tlsConnectionStater); ok {
t := v.ConnectionState()
return &t
}
return nil
}