Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
  • renovate/git.autistici.org-ai3-go-common-digest
  • renovate/github.com-miekg-dns-1.x
  • renovate/github.com-prometheus-client_golang-1.x
  • renovate/golang.org-x-crypto-0.x
  • renovate/golang.org-x-net-0.x
  • v2
  • v3
8 results

Target

Select target project
  • ai3/tools/acmeserver
  • godog/acmeserver
  • svp-bot/acmeserver
3 results
Select Git revision
  • lintian-fixes
  • master
  • renovate/github.com-miekg-dns-1.x
  • renovate/golang.org-x-crypto-digest
4 results
Show changes
Showing
with 7474 additions and 0 deletions
// +build fuzz
package dns
import "strings"
func Fuzz(data []byte) int {
msg := new(Msg)
if err := msg.Unpack(data); err != nil {
return 0
}
if _, err := msg.Pack(); err != nil {
return 0
}
return 1
}
func FuzzNewRR(data []byte) int {
str := string(data)
// Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer
// at avoiding them.
// See GH#1025 for context.
if strings.Contains(strings.ToUpper(str), "$INCLUDE") {
return -1
}
if _, err := NewRR(str); err != nil {
return 0
}
return 1
}
package dns
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
// Parse the $GENERATE statement as used in BIND9 zones.
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
// We are called after '$GENERATE '. After which we expect:
// * the range (12-24/2)
// * lhs (ownername)
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
func (zp *ZoneParser) generate(l lex) (RR, bool) {
token := l.token
step := int64(1)
if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(token) {
return zp.setParseError("bad step in $GENERATE range", l)
}
s, err := strconv.ParseInt(token[i+1:], 10, 64)
if err != nil || s <= 0 {
return zp.setParseError("bad step in $GENERATE range", l)
}
step = s
token = token[:i]
}
sx := strings.SplitN(token, "-", 2)
if len(sx) != 2 {
return zp.setParseError("bad start-stop in $GENERATE range", l)
}
start, err := strconv.ParseInt(sx[0], 10, 64)
if err != nil {
return zp.setParseError("bad start in $GENERATE range", l)
}
end, err := strconv.ParseInt(sx[1], 10, 64)
if err != nil {
return zp.setParseError("bad stop in $GENERATE range", l)
}
if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
return zp.setParseError("bad range in $GENERATE range", l)
}
// _BLANK
l, ok := zp.c.Next()
if !ok || l.value != zBlank {
return zp.setParseError("garbage after $GENERATE range", l)
}
// Create a complete new string, which we then parse again.
var s string
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
if l.err {
return zp.setParseError("bad data in $GENERATE directive", l)
}
if l.value == zNewline {
break
}
s += l.token
}
r := &generateReader{
s: s,
cur: start,
start: start,
end: end,
step: step,
file: zp.file,
lex: &l,
}
zp.sub = NewZoneParser(r, zp.origin, zp.file)
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
zp.sub.generateDisallowed = true
zp.sub.SetDefaultTTL(defaultTtl)
return zp.subNext()
}
type generateReader struct {
s string
si int
cur int64
start int64
end int64
step int64
mod bytes.Buffer
escape bool
eof bool
file string
lex *lex
}
func (r *generateReader) parseError(msg string, end int) *ParseError {
r.eof = true // Make errors sticky.
l := *r.lex
l.token = r.s[r.si-1 : end]
l.column += r.si // l.column starts one zBLANK before r.s
return &ParseError{r.file, msg, l}
}
func (r *generateReader) Read(p []byte) (int, error) {
// NewZLexer, through NewZoneParser, should use ReadByte and
// not end up here.
panic("not implemented")
}
func (r *generateReader) ReadByte() (byte, error) {
if r.eof {
return 0, io.EOF
}
if r.mod.Len() > 0 {
return r.mod.ReadByte()
}
if r.si >= len(r.s) {
r.si = 0
r.cur += r.step
r.eof = r.cur > r.end || r.cur < 0
return '\n', nil
}
si := r.si
r.si++
switch r.s[si] {
case '\\':
if r.escape {
r.escape = false
return '\\', nil
}
r.escape = true
return r.ReadByte()
case '$':
if r.escape {
r.escape = false
return '$', nil
}
mod := "%d"
if si >= len(r.s)-1 {
// End of the string
fmt.Fprintf(&r.mod, mod, r.cur)
return r.mod.ReadByte()
}
if r.s[si+1] == '$' {
r.si++
return '$', nil
}
var offset int64
// Search for { and }
if r.s[si+1] == '{' {
// Modifier block
sep := strings.Index(r.s[si+2:], "}")
if sep < 0 {
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
}
var errMsg string
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
if r.start+offset < 0 || r.end+offset > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
}
r.si += 2 + sep // Jump to it
}
fmt.Fprintf(&r.mod, mod, r.cur+offset)
return r.mod.ReadByte()
default:
if r.escape { // Pretty useless here
r.escape = false
return r.ReadByte()
}
return r.s[si], nil
}
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int64, string) {
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary.
var offStr, widthStr, base string
switch xs := strings.Split(s, ","); len(xs) {
case 1:
offStr, widthStr, base = xs[0], "0", "d"
case 2:
offStr, widthStr, base = xs[0], xs[1], "d"
case 3:
offStr, widthStr, base = xs[0], xs[1], xs[2]
default:
return "", 0, "bad modifier in $GENERATE"
}
switch base {
case "o", "d", "x", "X":
default:
return "", 0, "bad base in $GENERATE"
}
offset, err := strconv.ParseInt(offStr, 10, 64)
if err != nil {
return "", 0, "bad offset in $GENERATE"
}
width, err := strconv.ParseInt(widthStr, 10, 64)
if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE"
}
if width == 0 {
return "%" + base, offset, ""
}
return "%0" + widthStr + base, offset, ""
}
package dns
import (
"bytes"
"crypto"
"hash"
)
// identityHash will not hash, it only buffers the data written into it and returns it as-is.
type identityHash struct {
b *bytes.Buffer
}
// Implement the hash.Hash interface.
func (i identityHash) Write(b []byte) (int, error) { return i.b.Write(b) }
func (i identityHash) Size() int { return i.b.Len() }
func (i identityHash) BlockSize() int { return 1024 }
func (i identityHash) Reset() { i.b.Reset() }
func (i identityHash) Sum(b []byte) []byte { return append(b, i.b.Bytes()...) }
func hashFromAlgorithm(alg uint8) (hash.Hash, crypto.Hash, error) {
hashnumber, ok := AlgorithmToHash[alg]
if !ok {
return nil, 0, ErrAlg
}
if hashnumber == 0 {
return identityHash{b: &bytes.Buffer{}}, hashnumber, nil
}
return hashnumber.New(), hashnumber, nil
}
package dns
// Holds a bunch of helper functions for dealing with labels.
// SplitDomainName splits a name string into it's labels.
// www.miek.nl. returns []string{"www", "miek", "nl"}
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
// The root label (.) returns nil. Note that using
// strings.Split(s) will work in most cases, but does not handle
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
if s == "" {
return nil
}
fqdnEnd := 0 // offset of the final '.' or the length of the name
idx := Split(s)
begin := 0
if IsFqdn(s) {
fqdnEnd = len(s) - 1
} else {
fqdnEnd = len(s)
}
switch len(idx) {
case 0:
return nil
case 1:
// no-op
default:
for _, end := range idx[1:] {
labels = append(labels, s[begin:end-1])
begin = end
}
}
return append(labels, s[begin:fqdnEnd])
}
// CompareDomainName compares the names s1 and s2 and
// returns how many labels they have in common starting from the *right*.
// The comparison stops at the first inequality. The names are downcased
// before the comparison.
//
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
// www.miek.nl. and www.bla.nl. have one label in common: nl
//
// s1 and s2 must be syntactically valid domain names.
func CompareDomainName(s1, s2 string) (n int) {
// the first check: root label
if s1 == "." || s2 == "." {
return 0
}
l1 := Split(s1)
l2 := Split(s2)
j1 := len(l1) - 1 // end
i1 := len(l1) - 2 // start
j2 := len(l2) - 1
i2 := len(l2) - 2
// the second check can be done here: last/only label
// before we fall through into the for-loop below
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
n++
} else {
return
}
for {
if i1 < 0 || i2 < 0 {
break
}
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
n++
} else {
break
}
j1--
i1--
j2--
i2--
}
return
}
// CountLabel counts the number of labels in the string s.
// s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) {
if s == "." {
return
}
off := 0
end := false
for {
off, end = NextLabel(s, off)
labels++
if end {
return
}
}
}
// Split splits a name s into its label indexes.
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
// The root name (.) returns nil. Also see SplitDomainName.
// s must be a syntactically valid domain name.
func Split(s string) []int {
if s == "." {
return nil
}
idx := make([]int, 1, 3)
off := 0
end := false
for {
off, end = NextLabel(s, off)
if end {
return idx
}
idx = append(idx, off)
}
}
// NextLabel returns the index of the start of the next label in the
// string s starting at offset.
// The bool end is true when the end of the string has been reached.
// Also see PrevLabel.
func NextLabel(s string, offset int) (i int, end bool) {
if s == "" {
return 0, true
}
for i = offset; i < len(s)-1; i++ {
if s[i] != '.' {
continue
}
j := i - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-i)%2 == 0 {
continue
}
return i + 1, false
}
return i + 1, true
}
// PrevLabel returns the index of the label when starting from the right and
// jumping n labels to the left.
// The bool start is true when the start of the string has been overshot.
// Also see NextLabel.
func PrevLabel(s string, n int) (i int, start bool) {
if s == "" {
return 0, true
}
if n == 0 {
return len(s), false
}
l := len(s) - 1
if s[l] == '.' {
l--
}
for ; l >= 0 && n > 0; l-- {
if s[l] != '.' {
continue
}
j := l - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-l)%2 == 0 {
continue
}
n--
if n == 0 {
return l + 1, false
}
}
return 0, n > 1
}
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
func equal(a, b string) bool {
// might be lifted into API function.
la := len(a)
lb := len(b)
if la != lb {
return false
}
for i := la - 1; i >= 0; i-- {
ai := a[i]
bi := b[i]
if ai >= 'A' && ai <= 'Z' {
ai |= 'a' - 'A'
}
if bi >= 'A' && bi <= 'Z' {
bi |= 'a' - 'A'
}
if ai != bi {
return false
}
}
return true
}
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns
import "net"
const supportsReusePort = false
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.Listen(network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.ListenPacket(network, addr)
}
// +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns
import (
"context"
"net"
"syscall"
"golang.org/x/sys/unix"
)
const supportsReusePort = true
func reuseportControl(network, address string, c syscall.RawConn) error {
var opErr error
err := c.Control(func(fd uintptr) {
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
})
if err != nil {
return err
}
return opErr
}
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.Listen(context.Background(), network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.ListenPacket(context.Background(), network, addr)
}
// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
// and to - Pack() - wire format.
// All the packers and unpackers take a (msg []byte, off int)
// and return (off1 int, ok bool). If they return ok==false, they
// also return off1==len(msg), so that the next unpacker will
// also fail. This lets us avoid checks of ok until the end of a
// packing sequence.
package dns
//go:generate go run msg_generate.go
import (
"crypto/rand"
"encoding/binary"
"fmt"
"math/big"
"strconv"
"strings"
)
const (
maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4
// This is the maximum number of compression pointers that should occur in a
// semantically valid message. Each label in a domain name must be at least one
// octet and is separated by a period. The root label won't be represented by a
// compression pointer to a compression pointer, hence the -2 to exclude the
// smallest valid root label.
//
// It is possible to construct a valid message that has more compression pointers
// than this, and still doesn't loop, by pointing to a previous pointer. This is
// not something a well written implementation should ever do, so we leave them
// to trip the maximum compression pointer check.
maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2
// This is the maximum length of a domain name in presentation format. The
// maximum wire length of a domain name is 255 octets (see above), with the
// maximum label length being 63. The wire format requires one extra byte over
// the presentation format, reducing the number of octets by 1. Each label in
// the name will be separated by a single period, with each octet in the label
// expanding to at most 4 bytes (\DDD). If all other labels are of the maximum
// length, then the final label can only be 61 octets long to not exceed the
// maximum allowed wire length.
maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1
)
// Errors defined in this package.
var (
ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm.
ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication.
ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message.
ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized.
ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ...
ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID.
ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid.
ErrKey error = &Error{err: "bad key"}
ErrKeySize error = &Error{err: "bad key size"}
ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)}
ErrNoSig error = &Error{err: "no signature found"}
ErrPrivKey error = &Error{err: "bad private key"}
ErrRcode error = &Error{err: "bad rcode"}
ErrRdata error = &Error{err: "bad rdata"}
ErrRRset error = &Error{err: "bad rrset"}
ErrSecret error = &Error{err: "no secrets defined"}
ErrShortRead error = &Error{err: "short read"}
ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated.
ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication.
)
// Id by default returns a 16-bit random number to be used as a message id. The
// number is drawn from a cryptographically secure random number generator.
// This being a variable the function can be reassigned to a custom function.
// For instance, to make it return a static value for testing:
//
// dns.Id = func() uint16 { return 3 }
var Id = id
// id returns a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func id() uint16 {
var output uint16
err := binary.Read(rand.Reader, binary.BigEndian, &output)
if err != nil {
panic("dns: reading random id failed: " + err.Error())
}
return output
}
// MsgHdr is a a manually-unpacked version of (id, bits).
type MsgHdr struct {
Id uint16
Response bool
Opcode int
Authoritative bool
Truncated bool
RecursionDesired bool
RecursionAvailable bool
Zero bool
AuthenticatedData bool
CheckingDisabled bool
Rcode int
}
// Msg contains the layout of a DNS message.
type Msg struct {
MsgHdr
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format.
Question []Question // Holds the RR(s) of the question section.
Answer []RR // Holds the RR(s) of the answer section.
Ns []RR // Holds the RR(s) of the authority section.
Extra []RR // Holds the RR(s) of the additional section.
}
// ClassToString is a maps Classes to strings for each CLASS wire type.
var ClassToString = map[uint16]string{
ClassINET: "IN",
ClassCSNET: "CS",
ClassCHAOS: "CH",
ClassHESIOD: "HS",
ClassNONE: "NONE",
ClassANY: "ANY",
}
// OpcodeToString maps Opcodes to strings.
var OpcodeToString = map[int]string{
OpcodeQuery: "QUERY",
OpcodeIQuery: "IQUERY",
OpcodeStatus: "STATUS",
OpcodeNotify: "NOTIFY",
OpcodeUpdate: "UPDATE",
}
// RcodeToString maps Rcodes to strings.
var RcodeToString = map[int]string{
RcodeSuccess: "NOERROR",
RcodeFormatError: "FORMERR",
RcodeServerFailure: "SERVFAIL",
RcodeNameError: "NXDOMAIN",
RcodeNotImplemented: "NOTIMP",
RcodeRefused: "REFUSED",
RcodeYXDomain: "YXDOMAIN", // See RFC 2136
RcodeYXRrset: "YXRRSET",
RcodeNXRrset: "NXRRSET",
RcodeNotAuth: "NOTAUTH",
RcodeNotZone: "NOTZONE",
RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891
// RcodeBadVers: "BADVERS",
RcodeBadKey: "BADKEY",
RcodeBadTime: "BADTIME",
RcodeBadMode: "BADMODE",
RcodeBadName: "BADNAME",
RcodeBadAlg: "BADALG",
RcodeBadTrunc: "BADTRUNC",
RcodeBadCookie: "BADCOOKIE",
}
// compressionMap is used to allow a more efficient compression map
// to be used for internal packDomainName calls without changing the
// signature or functionality of public API.
//
// In particular, map[string]uint16 uses 25% less per-entry memory
// than does map[string]int.
type compressionMap struct {
ext map[string]int // external callers
int map[string]uint16 // internal callers
}
func (m compressionMap) valid() bool {
return m.int != nil || m.ext != nil
}
func (m compressionMap) insert(s string, pos int) {
if m.ext != nil {
m.ext[s] = pos
} else {
m.int[s] = uint16(pos)
}
}
func (m compressionMap) find(s string) (int, bool) {
if m.ext != nil {
pos, ok := m.ext[s]
return pos, ok
}
pos, ok := m.int[s]
return int(pos), ok
}
// Domain names are a sequence of counted strings
// split at the dots. They end with a zero-length string.
// PackDomainName packs a domain name s into msg[off:].
// If compression is wanted compress must be true and the compression
// map needs to hold a mapping between domain names and offsets
// pointing into msg.
func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
return packDomainName(s, msg, off, compressionMap{ext: compression}, compress)
}
func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
// XXX: A logical copy of this function exists in IsDomainName and
// should be kept in sync with this function.
ls := len(s)
if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
return off, nil
}
// If not fully qualified, error out.
if !IsFqdn(s) {
return len(msg), ErrFqdn
}
// Each dot ends a segment of the name.
// We trade each dot byte for a length byte.
// Except for escaped dots (\.), which are normal dots.
// There is also a trailing zero.
// Compression
pointer := -1
// Emit sequence of counted strings, chopping at dots.
var (
begin int
compBegin int
compOff int
bs []byte
wasDot bool
)
loop:
for i := 0; i < ls; i++ {
var c byte
if bs == nil {
c = s[i]
} else {
c = bs[i]
}
switch c {
case '\\':
if off+1 > len(msg) {
return len(msg), ErrBuf
}
if bs == nil {
bs = []byte(s)
}
// check for \DDD
if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) {
bs[i] = dddToByte(bs[i+1:])
copy(bs[i+1:ls-3], bs[i+4:])
ls -= 3
compOff += 3
} else {
copy(bs[i:ls-1], bs[i+1:])
ls--
compOff++
}
wasDot = false
case '.':
if i == 0 && len(s) > 1 {
// leading dots are not legal except for the root zone
return len(msg), ErrRdata
}
if wasDot {
// two dots back to back is not legal
return len(msg), ErrRdata
}
wasDot = true
labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear
return len(msg), ErrRdata
}
// off can already (we're in a loop) be bigger than len(msg)
// this happens when a name isn't fully qualified
if off+1+labelLen > len(msg) {
return len(msg), ErrBuf
}
// Don't try to compress '.'
// We should only compress when compress is true, but we should also still pick
// up names that can be used for *future* compression(s).
if compression.valid() && !isRootLabel(s, bs, begin, ls) {
if p, ok := compression.find(s[compBegin:]); ok {
// The first hit is the longest matching dname
// keep the pointer offset we get back and store
// the offset of the current name, because that's
// where we need to insert the pointer later
// If compress is true, we're allowed to compress this dname
if compress {
pointer = p // Where to point to
break loop
}
} else if off < maxCompressionOffset {
// Only offsets smaller than maxCompressionOffset can be used.
compression.insert(s[compBegin:], off)
}
}
// The following is covered by the length check above.
msg[off] = byte(labelLen)
if bs == nil {
copy(msg[off+1:], s[begin:i])
} else {
copy(msg[off+1:], bs[begin:i])
}
off += 1 + labelLen
begin = i + 1
compBegin = begin + compOff
default:
wasDot = false
}
}
// Root label is special
if isRootLabel(s, bs, 0, ls) {
return off, nil
}
// If we did compression and we find something add the pointer here
if pointer != -1 {
// We have two bytes (14 bits) to put the pointer in
binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000))
return off + 2, nil
}
if off < len(msg) {
msg[off] = 0
}
return off + 1, nil
}
// isRootLabel returns whether s or bs, from off to end, is the root
// label ".".
//
// If bs is nil, s will be checked, otherwise bs will be checked.
func isRootLabel(s string, bs []byte, off, end int) bool {
if bs == nil {
return s[off:end] == "."
}
return end-off == 1 && bs[off] == '.'
}
// Unpack a domain name.
// In addition to the simple sequences of counted strings above,
// domain names are allowed to refer to strings elsewhere in the
// packet, to avoid repeating common suffixes when returning
// many entries in a single domain. The pointers are marked
// by a length byte with the top two bits set. Ignoring those
// two bits, that byte and the next give a 14 bit offset from msg[0]
// where we should pick up the trail.
// Note that if we jump elsewhere in the packet,
// we return off1 == the offset after the first pointer we found,
// which is where the next record will start.
// In theory, the pointers are only allowed to jump backward.
// We let them jump anywhere and stop jumping after a while.
// UnpackDomainName unpacks a domain name into a string. It returns
// the name, the new offset into msg and any error that occurred.
//
// When an error is encountered, the unpacked name will be discarded
// and len(msg) will be returned as the offset.
func UnpackDomainName(msg []byte, off int) (string, int, error) {
s := make([]byte, 0, maxDomainNamePresentationLength)
off1 := 0
lenmsg := len(msg)
budget := maxDomainNameWireOctets
ptr := 0 // number of pointers followed
Loop:
for {
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// end of name
break Loop
}
// literal string
if off+c > lenmsg {
return "", lenmsg, ErrBuf
}
budget -= c + 1 // +1 for the label separator
if budget <= 0 {
return "", lenmsg, ErrLongDomain
}
for _, b := range msg[off : off+c] {
if isDomainNameLabelSpecial(b) {
s = append(s, '\\', b)
} else if b < ' ' || b > '~' {
s = append(s, escapeByte(b)...)
} else {
s = append(s, b)
}
}
s = append(s, '.')
off += c
case 0xC0:
// pointer to somewhere else in msg.
// remember location after first ptr,
// since that's how many bytes we consumed.
// also, don't follow too many pointers --
// maybe there's a loop.
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c1 := msg[off]
off++
if ptr == 0 {
off1 = off
}
if ptr++; ptr > maxCompressionPointers {
return "", lenmsg, &Error{err: "too many compression pointers"}
}
// pointer should guarantee that it advances and points forwards at least
// but the condition on previous three lines guarantees that it's
// at least loop-free
off = (c^0xC0)<<8 | int(c1)
default:
// 0x80 and 0x40 are reserved
return "", lenmsg, ErrRdata
}
}
if ptr == 0 {
off1 = off
}
if len(s) == 0 {
return ".", off1, nil
}
return string(s), off1, nil
}
func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
if len(txt) == 0 {
if offset >= len(msg) {
return offset, ErrBuf
}
msg[offset] = 0
return offset, nil
}
var err error
for _, s := range txt {
if len(s) > len(tmp) {
return offset, ErrBuf
}
offset, err = packTxtString(s, msg, offset, tmp)
if err != nil {
return offset, err
}
}
return offset, nil
}
func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
lenByteOffset := offset
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
offset++
bs := tmp[:len(s)]
copy(bs, s)
for i := 0; i < len(bs); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
// check for \DDD
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
msg[offset] = bs[i]
}
} else {
msg[offset] = bs[i]
}
offset++
}
l := offset - lenByteOffset - 1
if l > 255 {
return offset, &Error{err: "string exceeded 255 bytes in txt"}
}
msg[lenByteOffset] = byte(l)
return offset, nil
}
func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
bs := tmp[:len(s)]
copy(bs, s)
for i := 0; i < len(bs); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
// check for \DDD
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
msg[offset] = bs[i]
}
} else {
msg[offset] = bs[i]
}
offset++
}
return offset, nil
}
func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
off = off0
var s string
for off < len(msg) && err == nil {
s, off, err = unpackString(msg, off)
if err == nil {
ss = append(ss, s)
}
}
return
}
// Helpers for dealing with escaped bytes
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
func dddToByte(s []byte) byte {
_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
func dddStringToByte(s string) byte {
_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
// Helper function for packing and unpacking
func intToBytes(i *big.Int, length int) []byte {
buf := i.Bytes()
if len(buf) < length {
b := make([]byte, length)
copy(b[length-len(buf):], buf)
return b
}
return buf
}
// PackRR packs a resource record rr into msg[off:].
// See PackDomainName for documentation about the compression.
func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress)
if err == nil {
// packRR no longer sets the Rdlength field on the rr, but
// callers might be expecting it so we set it here.
rr.Header().Rdlength = uint16(off1 - headerEnd)
}
return off1, err
}
func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) {
if rr == nil {
return len(msg), len(msg), &Error{err: "nil rr"}
}
headerEnd, err = rr.Header().packHeader(msg, off, compression, compress)
if err != nil {
return headerEnd, len(msg), err
}
off1, err = rr.pack(msg, headerEnd, compression, compress)
if err != nil {
return headerEnd, len(msg), err
}
rdlength := off1 - headerEnd
if int(uint16(rdlength)) != rdlength { // overflow
return headerEnd, len(msg), ErrRdata
}
// The RDLENGTH field is the last field in the header and we set it here.
binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength))
return headerEnd, off1, nil
}
// UnpackRR unpacks msg[off:] into an RR.
func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
h, off, msg, err := unpackHeader(msg, off)
if err != nil {
return nil, len(msg), err
}
return UnpackRRWithHeader(h, msg, off)
}
// UnpackRRWithHeader unpacks the record type specific payload given an existing
// RR_Header.
func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
if newFn, ok := TypeToRR[h.Rrtype]; ok {
rr = newFn()
*rr.Header() = h
} else {
rr = &RFC3597{Hdr: h}
}
if off < 0 || off > len(msg) {
return &h, off, &Error{err: "bad off"}
}
end := off + int(h.Rdlength)
if end < off || end > len(msg) {
return &h, end, &Error{err: "bad rdlength"}
}
if noRdata(h) {
return rr, off, nil
}
off, err = rr.unpack(msg, off)
if err != nil {
return nil, end, err
}
if off != end {
return &h, end, &Error{err: "bad rdlength"}
}
return rr, off, nil
}
// unpackRRslice unpacks msg[off:] into an []RR.
// If we cannot unpack the whole array, then it will return nil
func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
var r RR
// Don't pre-allocate, l may be under attacker control
var dst []RR
for i := 0; i < l; i++ {
off1 := off
r, off, err = UnpackRR(msg, off)
if err != nil {
off = len(msg)
break
}
// If offset does not increase anymore, l is a lie
if off1 == off {
break
}
dst = append(dst, r)
}
if err != nil && off == len(msg) {
dst = nil
}
return dst, off, err
}
// Convert a MsgHdr to a string, with dig-like headers:
//
//;; opcode: QUERY, status: NOERROR, id: 48404
//
//;; flags: qr aa rd ra;
func (h *MsgHdr) String() string {
if h == nil {
return "<nil> MsgHdr"
}
s := ";; opcode: " + OpcodeToString[h.Opcode]
s += ", status: " + RcodeToString[h.Rcode]
s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
s += ";; flags:"
if h.Response {
s += " qr"
}
if h.Authoritative {
s += " aa"
}
if h.Truncated {
s += " tc"
}
if h.RecursionDesired {
s += " rd"
}
if h.RecursionAvailable {
s += " ra"
}
if h.Zero { // Hmm
s += " z"
}
if h.AuthenticatedData {
s += " ad"
}
if h.CheckingDisabled {
s += " cd"
}
s += ";"
return s
}
// Pack packs a Msg: it is converted to to wire format.
// If the dns.Compress is true the message will be in compressed wire format.
func (dns *Msg) Pack() (msg []byte, err error) {
return dns.PackBuffer(nil)
}
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
// If this message can't be compressed, avoid filling the
// compression map and creating garbage.
if dns.Compress && dns.isCompressible() {
compression := make(map[string]uint16) // Compression pointer mappings.
return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true)
}
return dns.packBufferWithCompressionMap(buf, compressionMap{}, false)
}
// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) {
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode
}
// Set extended rcode unconditionally if we have an opt, this will allow
// resetting the extended rcode bits if they need to.
if opt := dns.IsEdns0(); opt != nil {
opt.SetExtendedRcode(uint16(dns.Rcode))
} else if dns.Rcode > 0xF {
// If Rcode is an extended one and opt is nil, error out.
return nil, ErrExtendedRcode
}
// Convert convenient Msg into wire-like Header.
var dh Header
dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
if dns.Response {
dh.Bits |= _QR
}
if dns.Authoritative {
dh.Bits |= _AA
}
if dns.Truncated {
dh.Bits |= _TC
}
if dns.RecursionDesired {
dh.Bits |= _RD
}
if dns.RecursionAvailable {
dh.Bits |= _RA
}
if dns.Zero {
dh.Bits |= _Z
}
if dns.AuthenticatedData {
dh.Bits |= _AD
}
if dns.CheckingDisabled {
dh.Bits |= _CD
}
dh.Qdcount = uint16(len(dns.Question))
dh.Ancount = uint16(len(dns.Answer))
dh.Nscount = uint16(len(dns.Ns))
dh.Arcount = uint16(len(dns.Extra))
// We need the uncompressed length here, because we first pack it and then compress it.
msg = buf
uncompressedLen := msgLenWithCompressionMap(dns, nil)
if packLen := uncompressedLen + 1; len(msg) < packLen {
msg = make([]byte, packLen)
}
// Pack it in: header and then the pieces.
off := 0
off, err = dh.pack(msg, off, compression, compress)
if err != nil {
return nil, err
}
for _, r := range dns.Question {
off, err = r.pack(msg, off, compression, compress)
if err != nil {
return nil, err
}
}
for _, r := range dns.Answer {
_, off, err = packRR(r, msg, off, compression, compress)
if err != nil {
return nil, err
}
}
for _, r := range dns.Ns {
_, off, err = packRR(r, msg, off, compression, compress)
if err != nil {
return nil, err
}
}
for _, r := range dns.Extra {
_, off, err = packRR(r, msg, off, compression, compress)
if err != nil {
return nil, err
}
}
return msg[:off], nil
}
func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
// If we are at the end of the message we should return *just* the
// header. This can still be useful to the caller. 9.9.9.9 sends these
// when responding with REFUSED for instance.
if off == len(msg) {
// reset sections before returning
dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil
return nil
}
// Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are
// attacker controlled. This means we can't use them to pre-allocate
// slices.
dns.Question = nil
for i := 0; i < int(dh.Qdcount); i++ {
off1 := off
var q Question
q, off, err = unpackQuestion(msg, off)
if err != nil {
return err
}
if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie!
dh.Qdcount = uint16(i)
break
}
dns.Question = append(dns.Question, q)
}
dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off)
// The header counts might have been wrong so we need to update it
dh.Ancount = uint16(len(dns.Answer))
if err == nil {
dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off)
}
// The header counts might have been wrong so we need to update it
dh.Nscount = uint16(len(dns.Ns))
if err == nil {
dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
}
// The header counts might have been wrong so we need to update it
dh.Arcount = uint16(len(dns.Extra))
// Set extended Rcode
if opt := dns.IsEdns0(); opt != nil {
dns.Rcode |= opt.ExtendedRcode()
}
if off != len(msg) {
// TODO(miek) make this an error?
// use PackOpt to let people tell how detailed the error reporting should be?
// println("dns: extra bytes in dns packet", off, "<", len(msg))
}
return err
}
// Unpack unpacks a binary message to a Msg structure.
func (dns *Msg) Unpack(msg []byte) (err error) {
dh, off, err := unpackMsgHdr(msg, 0)
if err != nil {
return err
}
dns.setHdr(dh)
return dns.unpack(dh, msg, off)
}
// Convert a complete message to a string with dig-like output.
func (dns *Msg) String() string {
if dns == nil {
return "<nil> MsgHdr"
}
s := dns.MsgHdr.String() + " "
s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
opt := dns.IsEdns0()
if opt != nil {
// OPT PSEUDOSECTION
s += opt.String() + "\n"
}
if len(dns.Question) > 0 {
s += "\n;; QUESTION SECTION:\n"
for _, r := range dns.Question {
s += r.String() + "\n"
}
}
if len(dns.Answer) > 0 {
s += "\n;; ANSWER SECTION:\n"
for _, r := range dns.Answer {
if r != nil {
s += r.String() + "\n"
}
}
}
if len(dns.Ns) > 0 {
s += "\n;; AUTHORITY SECTION:\n"
for _, r := range dns.Ns {
if r != nil {
s += r.String() + "\n"
}
}
}
if len(dns.Extra) > 0 && (opt == nil || len(dns.Extra) > 1) {
s += "\n;; ADDITIONAL SECTION:\n"
for _, r := range dns.Extra {
if r != nil && r.Header().Rrtype != TypeOPT {
s += r.String() + "\n"
}
}
}
return s
}
// isCompressible returns whether the msg may be compressible.
func (dns *Msg) isCompressible() bool {
// If we only have one question, there is nothing we can ever compress.
return len(dns.Question) > 1 || len(dns.Answer) > 0 ||
len(dns.Ns) > 0 || len(dns.Extra) > 0
}
// Len returns the message length when in (un)compressed wire format.
// If dns.Compress is true compression it is taken into account. Len()
// is provided to be a faster way to get the size of the resulting packet,
// than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int {
// If this message can't be compressed, avoid filling the
// compression map and creating garbage.
if dns.Compress && dns.isCompressible() {
compression := make(map[string]struct{})
return msgLenWithCompressionMap(dns, compression)
}
return msgLenWithCompressionMap(dns, nil)
}
func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int {
l := headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
for _, r := range dns.Answer {
if r != nil {
l += r.len(l, compression)
}
}
for _, r := range dns.Ns {
if r != nil {
l += r.len(l, compression)
}
}
for _, r := range dns.Extra {
if r != nil {
l += r.len(l, compression)
}
}
return l
}
func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int {
if s == "" || s == "." {
return 1
}
escaped := strings.Contains(s, "\\")
if compression != nil && (compress || off < maxCompressionOffset) {
// compressionLenSearch will insert the entry into the compression
// map if it doesn't contain it.
if l, ok := compressionLenSearch(compression, s, off); ok && compress {
if escaped {
return escapedNameLen(s[:l]) + 2
}
return l + 2
}
}
if escaped {
return escapedNameLen(s) + 1
}
return len(s) + 1
}
func escapedNameLen(s string) int {
nameLen := len(s)
for i := 0; i < len(s); i++ {
if s[i] != '\\' {
continue
}
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
nameLen -= 3
i += 3
} else {
nameLen--
i++
}
}
return nameLen
}
func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) {
for off, end := 0, false; !end; off, end = NextLabel(s, off) {
if _, ok := c[s[off:]]; ok {
return off, true
}
if msgOff+off < maxCompressionOffset {
c[s[off:]] = struct{}{}
}
}
return 0, false
}
// Copy returns a new RR which is a deep-copy of r.
func Copy(r RR) RR { return r.copy() }
// Len returns the length (in octets) of the uncompressed RR in wire format.
func Len(r RR) int { return r.len(0, nil) }
// Copy returns a new *Msg which is a deep-copy of dns.
func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
func (dns *Msg) CopyTo(r1 *Msg) *Msg {
r1.MsgHdr = dns.MsgHdr
r1.Compress = dns.Compress
if len(dns.Question) > 0 {
r1.Question = make([]Question, len(dns.Question))
copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
}
rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):]
r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):]
r1.Extra = rrArr[:0:len(dns.Extra)]
for _, r := range dns.Answer {
r1.Answer = append(r1.Answer, r.copy())
}
for _, r := range dns.Ns {
r1.Ns = append(r1.Ns, r.copy())
}
for _, r := range dns.Extra {
r1.Extra = append(r1.Extra, r.copy())
}
return r1
}
func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
off, err := packDomainName(q.Name, msg, off, compression, compress)
if err != nil {
return off, err
}
off, err = packUint16(q.Qtype, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(q.Qclass, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func unpackQuestion(msg []byte, off int) (Question, int, error) {
var (
q Question
err error
)
q.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qtype, off, err = unpackUint16(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qclass, off, err = unpackUint16(msg, off)
if off == len(msg) {
return q, off, nil
}
return q, off, err
}
func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
off, err := packUint16(dh.Id, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Bits, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Qdcount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Ancount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Nscount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Arcount, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
var (
dh Header
err error
)
dh.Id, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Bits, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Qdcount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Ancount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Nscount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Arcount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
return dh, off, nil
}
// setHdr set the header in the dns using the binary data in dh.
func (dns *Msg) setHdr(dh Header) {
dns.Id = dh.Id
dns.Response = dh.Bits&_QR != 0
dns.Opcode = int(dh.Bits>>11) & 0xF
dns.Authoritative = dh.Bits&_AA != 0
dns.Truncated = dh.Bits&_TC != 0
dns.RecursionDesired = dh.Bits&_RD != 0
dns.RecursionAvailable = dh.Bits&_RA != 0
dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite.
dns.AuthenticatedData = dh.Bits&_AD != 0
dns.CheckingDisabled = dh.Bits&_CD != 0
dns.Rcode = int(dh.Bits & 0xF)
}
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"net"
"sort"
"strings"
)
// helper functions called from the generated zmsg.go
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
// packDataDomainName.
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv4len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking a"}
}
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
off += net.IPv4len
return a, off, nil
}
func packDataA(a net.IP, msg []byte, off int) (int, error) {
switch len(a) {
case net.IPv4len, net.IPv6len:
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
copy(msg[off:], a.To4())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates.
default:
return len(msg), &Error{err: "overflow packing a"}
}
return off, nil
}
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv6len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
}
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
off += net.IPv6len
return aaaa, off, nil
}
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
switch len(aaaa) {
case net.IPv6len:
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
copy(msg[off:], aaaa)
off += net.IPv6len
case 0:
// Allowed, dynamic updates.
default:
return len(msg), &Error{err: "overflow packing aaaa"}
}
return off, nil
}
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
// re-sliced msg according to the expected length of the RR.
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
hdr := RR_Header{}
if off == len(msg) {
return hdr, off, msg, nil
}
hdr.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rrtype, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Class, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Ttl, off, err = unpackUint32(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rdlength, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
return hdr, off, msg, err
}
// packHeader packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression.
func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
if off == len(msg) {
return off, nil
}
off, err := packDomainName(hdr.Name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rrtype, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Class, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint32(hdr.Ttl, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
if err != nil {
return len(msg), err
}
return off, nil
}
// helper helper functions.
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
// Returns an error if msg is smaller than the expected size.
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
lenrd := off + int(rdlength)
if lenrd > len(msg) {
return msg, &Error{err: "overflowing header size"}
}
return msg[:lenrd], nil
}
var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
func fromBase32(s []byte) (buf []byte, err error) {
for i, b := range s {
if b >= 'a' && b <= 'z' {
s[i] = b - 32
}
}
buflen := base32HexNoPadEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32HexNoPadEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase32(b []byte) string {
return base32HexNoPadEncoding.EncodeToString(b)
}
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
// dynamicUpdate returns true if the Rdlength is zero.
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
}
return msg[off], off + 1, nil
}
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"}
}
msg[off] = i
return off + 1, nil
}
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
if off+2 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
}
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
}
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow packing uint16"}
}
binary.BigEndian.PutUint16(msg[off:], i)
return off + 2, nil
}
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
if off+4 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
}
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
}
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
if off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing uint32"}
}
binary.BigEndian.PutUint32(msg[off:], i)
return off + 4, nil
}
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
if off+6 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
}
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])
off += 6
return i, off, nil
}
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
if off+6 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
}
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
return off, nil
}
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
if off+8 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
}
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
}
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
if off+8 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64"}
}
binary.BigEndian.PutUint64(msg[off:], i)
off += 8
return off, nil
}
func unpackString(msg []byte, off int) (string, int, error) {
if off+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
l := int(msg[off])
off++
if off+l > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
var s strings.Builder
consumed := 0
for i, b := range msg[off : off+l] {
switch {
case b == '"' || b == '\\':
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteByte('\\')
s.WriteByte(b)
consumed = i + 1
case b < ' ' || b > '~': // unprintable
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteString(escapeByte(b))
consumed = i + 1
}
}
if consumed == 0 { // no escaping needed
return string(msg[off : off+l]), off + l, nil
}
s.Write(msg[off+consumed : off+l])
return s.String(), off + l, nil
}
func packString(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packTxtString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base32"}
}
s := toBase32(msg[off:end])
return s, end, nil
}
func packStringBase32(s string, msg []byte, off int) (int, error) {
b32, err := fromBase32([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b32) > len(msg) {
return len(msg), &Error{err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
return off, nil
}
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is base64 encoded value, so we don't need an explicit length
// to be set. Thus far all RR's that have base64 encoded fields have those as their
// last one. What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base64"}
}
s := toBase64(msg[off:end])
return s, end, nil
}
func packStringBase64(s string, msg []byte, off int) (int, error) {
b64, err := fromBase64([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b64) > len(msg) {
return len(msg), &Error{err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
return off, nil
}
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is hex encoded value, so we don't need an explicit length
// to be set. NSEC and TSIG have hex fields with a length field.
// What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking hex"}
}
s := hex.EncodeToString(msg[off:end])
return s, end, nil
}
func packStringHex(s string, msg []byte, off int) (int, error) {
h, err := hex.DecodeString(s)
if err != nil {
return len(msg), err
}
if off+len(h) > len(msg) {
return len(msg), &Error{err: "overflow packing hex"}
}
copy(msg[off:off+len(h)], h)
off += len(h)
return off, nil
}
func unpackStringAny(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking anything"}
}
return string(msg[off:end]), end, nil
}
func packStringAny(s string, msg []byte, off int) (int, error) {
if off+len(s) > len(msg) {
return len(msg), &Error{err: "overflow packing anything"}
}
copy(msg[off:off+len(s)], s)
off += len(s)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off)
if err != nil {
return nil, len(msg), err
}
return txt, off, nil
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
off, err := packTxt(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0
Option:
var code uint16
if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
code = binary.BigEndian.Uint16(msg[off:])
off += 2
optlen := binary.BigEndian.Uint16(msg[off:])
off += 2
if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
e := makeDataOpt(code)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
if off < len(msg) {
goto Option
}
return edns, off, nil
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
if err != nil || off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4
if off+len(b) > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
return off, nil
}
func unpackStringOctet(msg []byte, off int) (string, int, error) {
s := string(msg[off:])
return s, len(msg), nil
}
func packStringOctet(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packOctetString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
var nsec []uint16
length, window, lastwindow := 0, 0, -1
for off < len(msg) {
if off+2 > len(msg) {
return nsec, len(msg), &Error{err: "overflow unpacking NSEC(3)"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return nsec, len(msg), &Error{err: "out of order NSEC(3) block in type bitmap"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return nsec, len(msg), &Error{err: "empty NSEC(3) block in type bitmap"}
}
if length > 32 {
return nsec, len(msg), &Error{err: "NSEC(3) block too long in type bitmap"}
}
if off+length > len(msg) {
return nsec, len(msg), &Error{err: "overflowing NSEC(3) block in type bitmap"}
}
// Walk the bytes in the window and extract the type bits
for j, b := range msg[off : off+length] {
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
return nsec, off, nil
}
// typeBitMapLen is a helper function which computes the "maximum" length of
// a the NSEC Type BitMap field.
func typeBitMapLen(bitmap []uint16) int {
var l int
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
l += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
// packDataNsec would return Error{err: "nsec bits out of order"} here, but
// when computing the length, we want do be liberal.
continue
}
lastwindow, lastlength = window, length
}
l += int(lastlength) + 2
return l
}
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
if len(bitmap) == 0 {
return off, nil
}
if off > len(msg) {
return off, &Error{err: "overflow packing nsec"}
}
toZero := msg[off:]
if maxLen := typeBitMapLen(bitmap); maxLen < len(toZero) {
toZero = toZero[:maxLen]
}
for i := range toZero {
toZero[i] = 0
}
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - t%8))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
return off, nil
}
func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) {
var xs []SVCBKeyValue
var code uint16
var length uint16
var err error
for off < len(msg) {
code, off, err = unpackUint16(msg, off)
if err != nil {
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
}
length, off, err = unpackUint16(msg, off)
if err != nil || off+int(length) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
}
e := makeSVCBKeyValue(SVCBKey(code))
if e == nil {
return nil, len(msg), &Error{err: "bad SVCB key"}
}
if err := e.unpack(msg[off : off+int(length)]); err != nil {
return nil, len(msg), err
}
if len(xs) > 0 && e.Key() <= xs[len(xs)-1].Key() {
return nil, len(msg), &Error{err: "SVCB keys not in strictly increasing order"}
}
xs = append(xs, e)
off += int(length)
}
return xs, off, nil
}
func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) {
pairs = append([]SVCBKeyValue(nil), pairs...)
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].Key() < pairs[j].Key()
})
prev := svcb_RESERVED
for _, el := range pairs {
if el.Key() == prev {
return len(msg), &Error{err: "repeated SVCB keys are not allowed"}
}
prev = el.Key()
packed, err := el.pack()
if err != nil {
return len(msg), err
}
off, err = packUint16(uint16(el.Key()), msg, off)
if err != nil {
return len(msg), &Error{err: "overflow packing SVCB"}
}
off, err = packUint16(uint16(len(packed)), msg, off)
if err != nil || off+len(packed) > len(msg) {
return len(msg), &Error{err: "overflow packing SVCB"}
}
copy(msg[off:off+len(packed)], packed)
off += len(packed)
}
return off, nil
}
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
var (
servers []string
s string
err error
)
if end > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
}
for off < end {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return servers, len(msg), err
}
servers = append(servers, s)
}
return servers, off, nil
}
func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
var err error
for _, name := range names {
off, err = packDomainName(name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
var err error
for i := range data {
off, err = packDataAplPrefix(&data[i], msg, off)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
if len(p.Network.IP) != len(p.Network.Mask) {
return len(msg), &Error{err: "address and mask lengths don't match"}
}
var err error
prefix, _ := p.Network.Mask.Size()
addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
switch len(p.Network.IP) {
case net.IPv4len:
off, err = packUint16(1, msg, off)
case net.IPv6len:
off, err = packUint16(2, msg, off)
default:
err = &Error{err: "unrecognized address family"}
}
if err != nil {
return len(msg), err
}
off, err = packUint8(uint8(prefix), msg, off)
if err != nil {
return len(msg), err
}
var n uint8
if p.Negation {
n = 0x80
}
// trim trailing zero bytes as specified in RFC3123 Sections 4.1 and 4.2.
i := len(addr) - 1
for ; i >= 0 && addr[i] == 0; i-- {
}
addr = addr[:i+1]
adflen := uint8(len(addr)) & 0x7f
off, err = packUint8(n|adflen, msg, off)
if err != nil {
return len(msg), err
}
if off+len(addr) > len(msg) {
return len(msg), &Error{err: "overflow packing APL prefix"}
}
off += copy(msg[off:], addr)
return off, nil
}
func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
var result []APLPrefix
for off < len(msg) {
prefix, end, err := unpackDataAplPrefix(msg, off)
if err != nil {
return nil, len(msg), err
}
off = end
result = append(result, prefix)
}
return result, off, nil
}
func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
family, off, err := unpackUint16(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
prefix, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
nlen, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
var ip []byte
switch family {
case 1:
ip = make([]byte, net.IPv4len)
case 2:
ip = make([]byte, net.IPv6len)
default:
return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
}
if int(prefix) > 8*len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
}
afdlen := int(nlen & 0x7f)
if afdlen > len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL length too long"}
}
if off+afdlen > len(msg) {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
}
// Address MUST NOT contain trailing zero bytes per RFC3123 Sections 4.1 and 4.2.
off += copy(ip, msg[off:off+afdlen])
if afdlen > 0 {
last := ip[afdlen-1]
if last == 0 {
return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
}
}
ipnet := net.IPNet{
IP: ip,
Mask: net.CIDRMask(int(prefix), 8*len(ip)),
}
return APLPrefix{
Negation: (nlen & 0x80) != 0,
Network: ipnet,
}, off, nil
}
package dns
// Truncate ensures the reply message will fit into the requested buffer
// size by removing records that exceed the requested size.
//
// It will first check if the reply fits without compression and then with
// compression. If it won't fit with compression, Truncate then walks the
// record adding as many records as possible without exceeding the
// requested buffer size.
//
// If the message fits within the requested size without compression,
// Truncate will set the message's Compress attribute to false. It is
// the caller's responsibility to set it back to true if they wish to
// compress the payload regardless of size.
//
// The TC bit will be set if any records were excluded from the message.
// If the TC bit is already set on the message it will be retained.
// TC indicates that the client should retry over TCP.
//
// According to RFC 2181, the TC bit should only be set if not all of the
// "required" RRs can be included in the response. Unfortunately, we have
// no way of knowing which RRs are required so we set the TC bit if any RR
// had to be omitted from the response.
//
// The appropriate buffer size can be retrieved from the requests OPT
// record, if present, and is transport specific otherwise. dns.MinMsgSize
// should be used for UDP requests without an OPT record, and
// dns.MaxMsgSize for TCP requests without an OPT record.
func (dns *Msg) Truncate(size int) {
if dns.IsTsig() != nil {
// To simplify this implementation, we don't perform
// truncation on responses with a TSIG record.
return
}
// RFC 6891 mandates that the payload size in an OPT record
// less than 512 (MinMsgSize) bytes must be treated as equal to 512 bytes.
//
// For ease of use, we impose that restriction here.
if size < MinMsgSize {
size = MinMsgSize
}
l := msgLenWithCompressionMap(dns, nil) // uncompressed length
if l <= size {
// Don't waste effort compressing this message.
dns.Compress = false
return
}
dns.Compress = true
edns0 := dns.popEdns0()
if edns0 != nil {
// Account for the OPT record that gets added at the end,
// by subtracting that length from our budget.
//
// The EDNS(0) OPT record must have the root domain and
// it's length is thus unaffected by compression.
size -= Len(edns0)
}
compression := make(map[string]struct{})
l = headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
var numAnswer int
if l < size {
l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
}
var numNS int
if l < size {
l, numNS = truncateLoop(dns.Ns, size, l, compression)
}
var numExtra int
if l < size {
_, numExtra = truncateLoop(dns.Extra, size, l, compression)
}
// See the function documentation for when we set this.
dns.Truncated = dns.Truncated || len(dns.Answer) > numAnswer ||
len(dns.Ns) > numNS || len(dns.Extra) > numExtra
dns.Answer = dns.Answer[:numAnswer]
dns.Ns = dns.Ns[:numNS]
dns.Extra = dns.Extra[:numExtra]
if edns0 != nil {
// Add the OPT record back onto the additional section.
dns.Extra = append(dns.Extra, edns0)
}
}
func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
for i, r := range rrs {
if r == nil {
continue
}
l += r.len(l, compression)
if l > size {
// Return size, rather than l prior to this record,
// to prevent any further records being added.
return size, i
}
if l == size {
return l, i + 1
}
}
return l, len(rrs)
}
package dns
import (
"crypto/sha1"
"encoding/hex"
"strings"
)
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string {
if ha != SHA1 {
return ""
}
wireSalt := make([]byte, hex.DecodedLen(len(salt)))
n, err := packStringHex(salt, wireSalt, 0)
if err != nil {
return ""
}
wireSalt = wireSalt[:n]
name := make([]byte, 255)
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
if err != nil {
return ""
}
name = name[:off]
s := sha1.New()
// k = 0
s.Write(name)
s.Write(wireSalt)
nsec3 := s.Sum(nil)
// k > 0
for k := uint16(0); k < iter; k++ {
s.Reset()
s.Write(nsec3)
s.Write(wireSalt)
nsec3 = s.Sum(nsec3[:0])
}
return toBase32(nsec3)
}
// Cover returns true if a name is covered by the NSEC3 record.
func (rr *NSEC3) Cover(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
nextHash := rr.NextDomain
// if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
if ownerHash == nextHash && nameHash != ownerHash { // empty interval
return true
}
if ownerHash > nextHash { // end of zone
if nameHash > ownerHash { // covered since there is nothing after ownerHash
return true
}
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
}
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
return false
}
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
}
// Match returns true if a name matches the NSEC3 record
func (rr *NSEC3) Match(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
if ownerHash == nameHash {
return true
}
return false
}
package dns
import "strings"
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
// String returns the text presentation of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
// Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer.
Unpack([]byte) (int, error)
// Copy copies the Rdata into the PrivateRdata argument.
Copy(PrivateRdata) error
// Len returns the length in octets of the Rdata.
Len() int
}
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
// It mocks normal RRs and implements dns.RR interface.
type PrivateRR struct {
Hdr RR_Header
Data PrivateRdata
generator func() PrivateRdata // for copy
}
// Header return the RR header of r.
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface.
func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
l := r.Hdr.len(off, compression)
l += r.Data.Len()
return l
}
func (r *PrivateRR) copy() RR {
// make new RR like this:
rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
if err := r.Data.Copy(rr.Data); err != nil {
panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
return off, nil
}
func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
off1, err := r.Data.Unpack(msg[off:])
off += off1
return off, err
}
func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l, _ = c.Next(); l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := r.Data.Parse(text)
if err != nil {
return &ParseError{"", err.Error(), l}
}
return nil
}
func (r *PrivateRR) isDuplicate(r2 RR) bool { return false }
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
}
// PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(StringToType, rtypestr)
}
}
package dns
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// StringToOpcode is a map of opcodes to strings.
var StringToOpcode = reverseInt(OpcodeToString)
// StringToRcode is a map of rcodes to strings.
var StringToRcode = reverseInt(RcodeToString)
func init() {
// Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
StringToRcode["NOTIMPL"] = RcodeNotImplemented
}
// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// StringToCertType is the reverseof CertTypeToString.
var StringToCertType = reverseInt16(CertTypeToString)
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int, len(m))
for u, s := range m {
n[s] = u
}
return n
}
package dns
// Dedup removes identical RRs from rrs. It preserves the original ordering.
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
// rrs.
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
func Dedup(rrs []RR, m map[string]RR) []RR {
if m == nil {
m = make(map[string]RR)
}
// Save the keys, so we don't have to call normalizedString twice.
keys := make([]*string, 0, len(rrs))
for _, r := range rrs {
key := normalizedString(r)
keys = append(keys, &key)
if mr, ok := m[key]; ok {
// Shortest TTL wins.
rh, mrh := r.Header(), mr.Header()
if mrh.Ttl > rh.Ttl {
mrh.Ttl = rh.Ttl
}
continue
}
m[key] = r
}
// If the length of the result map equals the amount of RRs we got,
// it means they were all different. We can then just return the original rrset.
if len(m) == len(rrs) {
return rrs
}
j := 0
for i, r := range rrs {
// If keys[i] lives in the map, we should copy and remove it.
if _, ok := m[*keys[i]]; ok {
delete(m, *keys[i])
rrs[j] = r
j++
}
if len(m) == 0 {
break
}
}
return rrs[:j]
}
// normalizedString returns a normalized string from r. The TTL
// is removed and the domain name is lowercased. We go from this:
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
// lowercasename<TAB>CLASS<TAB>TYPE...
func normalizedString(r RR) string {
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
b := []byte(r.String())
// find the first non-escaped tab, then another, so we capture where the TTL lives.
esc := false
ttlStart, ttlEnd := 0, 0
for i := 0; i < len(b) && ttlEnd == 0; i++ {
switch {
case b[i] == '\\':
esc = !esc
case b[i] == '\t' && !esc:
if ttlStart == 0 {
ttlStart = i
continue
}
if ttlEnd == 0 {
ttlEnd = i
}
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
b[i] += 32
default:
esc = false
}
}
// remove TTL.
copy(b[ttlStart:], b[ttlEnd:])
cut := ttlEnd - ttlStart
return string(b[:len(b)-cut])
}
package dns
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
)
const maxTok = 2048 // Largest token we can return.
// The maximum depth of $INCLUDE directives supported by the
// ZoneParser API.
const maxIncludeDepth = 7
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
// * Add ownernames if they are left blank;
// * Suppress sequences of spaces;
// * Make each RR fit on one line (_NEWLINE is send as last)
// * Handle comments: ;
// * Handle braces - anywhere.
const (
// Zonefile
zEOF = iota
zString
zBlank
zQuote
zNewline
zRrtpe
zOwner
zClass
zDirOrigin // $ORIGIN
zDirTTL // $TTL
zDirInclude // $INCLUDE
zDirGenerate // $GENERATE
// Privatekey file
zValue
zKey
zExpectOwnerDir // Ownername
zExpectOwnerBl // Whitespace after the ownername
zExpectAny // Expect rrtype, ttl or class
zExpectAnyNoClass // Expect rrtype or ttl
zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
zExpectAnyNoTTL // Expect rrtype or class
zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL
zExpectRrtype // Expect rrtype
zExpectRrtypeBl // Whitespace BEFORE rrtype
zExpectRdata // The first element of the rdata
zExpectDirTTLBl // Space after directive $TTL
zExpectDirTTL // Directive $TTL
zExpectDirOriginBl // Space after directive $ORIGIN
zExpectDirOrigin // Directive $ORIGIN
zExpectDirIncludeBl // Space after directive $INCLUDE
zExpectDirInclude // Directive $INCLUDE
zExpectDirGenerate // Directive $GENERATE
zExpectDirGenerateBl // Space after directive $GENERATE
)
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
// where the error occurred.
type ParseError struct {
file string
err string
lex lex
}
func (e *ParseError) Error() (s string) {
if e.file != "" {
s = e.file + ": "
}
s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
return
}
type lex struct {
token string // text of the token
err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc.
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
line int // line in the file
column int // column in the file
}
// ttlState describes the state necessary to fill in an omitted RR TTL
type ttlState struct {
ttl uint32 // ttl is the current default TTL
isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
}
// NewRR reads the RR contained in the string s. Only the first RR is returned.
// If s contains no records, NewRR will return nil with no error.
//
// The class defaults to IN and TTL defaults to 3600. The full zone file syntax
// like $TTL, $ORIGIN, etc. is supported. All fields of the returned RR are
// set, except RR.Header().Rdlength which is set to 0.
func NewRR(s string) (RR, error) {
if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
return ReadRR(strings.NewReader(s+"\n"), "")
}
return ReadRR(strings.NewReader(s), "")
}
// ReadRR reads the RR contained in r.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives.
//
// See NewRR for more documentation.
func ReadRR(r io.Reader, file string) (RR, error) {
zp := NewZoneParser(r, ".", file)
zp.SetDefaultTTL(defaultTtl)
zp.SetIncludeAllowed(true)
rr, _ := zp.Next()
return rr, zp.Err()
}
// ZoneParser is a parser for an RFC 1035 style zonefile.
//
// Each parsed RR in the zone is returned sequentially from Next. An
// optional comment can be retrieved with Comment.
//
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
// supported. Although $INCLUDE is disabled by default.
// Note that $GENERATE's range support up to a maximum of 65535 steps.
//
// Basic usage pattern when reading from a string (z) containing the
// zone data:
//
// zp := NewZoneParser(strings.NewReader(z), "", "")
//
// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
// // Do something with rr
// }
//
// if err := zp.Err(); err != nil {
// // log.Println(err)
// }
//
// Comments specified after an RR (and on the same line!) are
// returned too:
//
// foo. IN A 10.0.0.1 ; this is a comment
//
// The text "; this is comment" is returned from Comment. Comments inside
// the RR are returned concatenated along with the RR. Comments on a line
// by themselves are discarded.
//
// Callers should not assume all returned data in an Resource Record is
// syntactically correct, e.g. illegal base64 in RRSIGs will be returned as-is.
type ZoneParser struct {
c *zlexer
parseErr *ParseError
origin string
file string
defttl *ttlState
h RR_Header
// sub is used to parse $INCLUDE files and $GENERATE directives.
// Next, by calling subNext, forwards the resulting RRs from this
// sub parser to the calling code.
sub *ZoneParser
osFile *os.File
includeDepth uint8
includeAllowed bool
generateDisallowed bool
}
// NewZoneParser returns an RFC 1035 style zonefile parser that reads
// from r.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives. The string origin is used as the initial
// origin, as if the file would start with an $ORIGIN directive.
func NewZoneParser(r io.Reader, origin, file string) *ZoneParser {
var pe *ParseError
if origin != "" {
origin = Fqdn(origin)
if _, ok := IsDomainName(origin); !ok {
pe = &ParseError{file, "bad initial origin name", lex{}}
}
}
return &ZoneParser{
c: newZLexer(r),
parseErr: pe,
origin: origin,
file: file,
}
}
// SetDefaultTTL sets the parsers default TTL to ttl.
func (zp *ZoneParser) SetDefaultTTL(ttl uint32) {
zp.defttl = &ttlState{ttl, false}
}
// SetIncludeAllowed controls whether $INCLUDE directives are
// allowed. $INCLUDE directives are not supported by default.
//
// The $INCLUDE directive will open and read from a user controlled
// file on the system. Even if the file is not a valid zonefile, the
// contents of the file may be revealed in error messages, such as:
//
// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31
// /etc/shadow: dns: not a TTL: "root:$6$<redacted>::0:99999:7:::" at line: 1:125
func (zp *ZoneParser) SetIncludeAllowed(v bool) {
zp.includeAllowed = v
}
// Err returns the first non-EOF error that was encountered by the
// ZoneParser.
func (zp *ZoneParser) Err() error {
if zp.parseErr != nil {
return zp.parseErr
}
if zp.sub != nil {
if err := zp.sub.Err(); err != nil {
return err
}
}
return zp.c.Err()
}
func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) {
zp.parseErr = &ParseError{zp.file, err, l}
return nil, false
}
// Comment returns an optional text comment that occurred alongside
// the RR.
func (zp *ZoneParser) Comment() string {
if zp.parseErr != nil {
return ""
}
if zp.sub != nil {
return zp.sub.Comment()
}
return zp.c.Comment()
}
func (zp *ZoneParser) subNext() (RR, bool) {
if rr, ok := zp.sub.Next(); ok {
return rr, true
}
if zp.sub.osFile != nil {
zp.sub.osFile.Close()
zp.sub.osFile = nil
}
if zp.sub.Err() != nil {
// We have errors to surface.
return nil, false
}
zp.sub = nil
return zp.Next()
}
// Next advances the parser to the next RR in the zonefile and
// returns the (RR, true). It will return (nil, false) when the
// parsing stops, either by reaching the end of the input or an
// error. After Next returns (nil, false), the Err method will return
// any error that occurred during parsing.
func (zp *ZoneParser) Next() (RR, bool) {
if zp.parseErr != nil {
return nil, false
}
if zp.sub != nil {
return zp.subNext()
}
// 6 possible beginnings of a line (_ is a space):
//
// 0. zRRTYPE -> all omitted until the rrtype
// 1. zOwner _ zRrtype -> class/ttl omitted
// 2. zOwner _ zString _ zRrtype -> class omitted
// 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
// 4. zOwner _ zClass _ zRrtype -> ttl omitted
// 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
//
// After detecting these, we know the zRrtype so we can jump to functions
// handling the rdata for each of these types.
st := zExpectOwnerDir // initial state
h := &zp.h
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
// zlexer spotted an error already
if l.err {
return zp.setParseError(l.token, l)
}
switch st {
case zExpectOwnerDir:
// We can also expect a directive, like $TTL or $ORIGIN
if zp.defttl != nil {
h.Ttl = zp.defttl.ttl
}
h.Class = ClassINET
switch l.value {
case zNewline:
st = zExpectOwnerDir
case zOwner:
name, ok := toAbsoluteName(l.token, zp.origin)
if !ok {
return zp.setParseError("bad owner name", l)
}
h.Name = name
st = zExpectOwnerBl
case zDirTTL:
st = zExpectDirTTLBl
case zDirOrigin:
st = zExpectDirOriginBl
case zDirInclude:
st = zExpectDirIncludeBl
case zDirGenerate:
st = zExpectDirGenerateBl
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
case zClass:
h.Class = l.torc
st = zExpectAnyNoClassBl
case zBlank:
// Discard, can happen when there is nothing on the
// line except the RR type
case zString:
ttl, ok := stringToTTL(l.token)
if !ok {
return zp.setParseError("not a TTL", l)
}
h.Ttl = ttl
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
}
st = zExpectAnyNoTTLBl
default:
return zp.setParseError("syntax error at beginning", l)
}
case zExpectDirIncludeBl:
if l.value != zBlank {
return zp.setParseError("no blank after $INCLUDE-directive", l)
}
st = zExpectDirInclude
case zExpectDirInclude:
if l.value != zString {
return zp.setParseError("expecting $INCLUDE value, not this...", l)
}
neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one
switch l, _ := zp.c.Next(); l.value {
case zBlank:
l, _ := zp.c.Next()
if l.value == zString {
name, ok := toAbsoluteName(l.token, zp.origin)
if !ok {
return zp.setParseError("bad origin name", l)
}
neworigin = name
}
case zNewline, zEOF:
// Ok
default:
return zp.setParseError("garbage after $INCLUDE", l)
}
if !zp.includeAllowed {
return zp.setParseError("$INCLUDE directive not allowed", l)
}
if zp.includeDepth >= maxIncludeDepth {
return zp.setParseError("too deeply nested $INCLUDE", l)
}
// Start with the new file
includePath := l.token
if !filepath.IsAbs(includePath) {
includePath = filepath.Join(filepath.Dir(zp.file), includePath)
}
r1, e1 := os.Open(includePath)
if e1 != nil {
var as string
if !filepath.IsAbs(l.token) {
as = fmt.Sprintf(" as `%s'", includePath)
}
msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1)
return zp.setParseError(msg, l)
}
zp.sub = NewZoneParser(r1, neworigin, includePath)
zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1
zp.sub.SetIncludeAllowed(true)
return zp.subNext()
case zExpectDirTTLBl:
if l.value != zBlank {
return zp.setParseError("no blank after $TTL-directive", l)
}
st = zExpectDirTTL
case zExpectDirTTL:
if l.value != zString {
return zp.setParseError("expecting $TTL value, not this...", l)
}
if err := slurpRemainder(zp.c); err != nil {
return zp.setParseError(err.err, err.lex)
}
ttl, ok := stringToTTL(l.token)
if !ok {
return zp.setParseError("expecting $TTL value, not this...", l)
}
zp.defttl = &ttlState{ttl, true}
st = zExpectOwnerDir
case zExpectDirOriginBl:
if l.value != zBlank {
return zp.setParseError("no blank after $ORIGIN-directive", l)
}
st = zExpectDirOrigin
case zExpectDirOrigin:
if l.value != zString {
return zp.setParseError("expecting $ORIGIN value, not this...", l)
}
if err := slurpRemainder(zp.c); err != nil {
return zp.setParseError(err.err, err.lex)
}
name, ok := toAbsoluteName(l.token, zp.origin)
if !ok {
return zp.setParseError("bad origin name", l)
}
zp.origin = name
st = zExpectOwnerDir
case zExpectDirGenerateBl:
if l.value != zBlank {
return zp.setParseError("no blank after $GENERATE-directive", l)
}
st = zExpectDirGenerate
case zExpectDirGenerate:
if zp.generateDisallowed {
return zp.setParseError("nested $GENERATE directive not allowed", l)
}
if l.value != zString {
return zp.setParseError("expecting $GENERATE value, not this...", l)
}
return zp.generate(l)
case zExpectOwnerBl:
if l.value != zBlank {
return zp.setParseError("no blank after owner", l)
}
st = zExpectAny
case zExpectAny:
switch l.value {
case zRrtpe:
if zp.defttl == nil {
return zp.setParseError("missing TTL with no previous value", l)
}
h.Rrtype = l.torc
st = zExpectRdata
case zClass:
h.Class = l.torc
st = zExpectAnyNoClassBl
case zString:
ttl, ok := stringToTTL(l.token)
if !ok {
return zp.setParseError("not a TTL", l)
}
h.Ttl = ttl
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
}
st = zExpectAnyNoTTLBl
default:
return zp.setParseError("expecting RR type, TTL or class, not this...", l)
}
case zExpectAnyNoClassBl:
if l.value != zBlank {
return zp.setParseError("no blank before class", l)
}
st = zExpectAnyNoClass
case zExpectAnyNoTTLBl:
if l.value != zBlank {
return zp.setParseError("no blank before TTL", l)
}
st = zExpectAnyNoTTL
case zExpectAnyNoTTL:
switch l.value {
case zClass:
h.Class = l.torc
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
default:
return zp.setParseError("expecting RR type or class, not this...", l)
}
case zExpectAnyNoClass:
switch l.value {
case zString:
ttl, ok := stringToTTL(l.token)
if !ok {
return zp.setParseError("not a TTL", l)
}
h.Ttl = ttl
if zp.defttl == nil || !zp.defttl.isByDirective {
zp.defttl = &ttlState{ttl, false}
}
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = zExpectRdata
default:
return zp.setParseError("expecting RR type or TTL, not this...", l)
}
case zExpectRrtypeBl:
if l.value != zBlank {
return zp.setParseError("no blank before RR type", l)
}
st = zExpectRrtype
case zExpectRrtype:
if l.value != zRrtpe {
return zp.setParseError("unknown RR type", l)
}
h.Rrtype = l.torc
st = zExpectRdata
case zExpectRdata:
var (
rr RR
parseAsRFC3597 bool
)
if newFn, ok := TypeToRR[h.Rrtype]; ok {
rr = newFn()
*rr.Header() = *h
// We may be parsing a known RR type using the RFC3597 format.
// If so, we handle that here in a generic way.
//
// This is also true for PrivateRR types which will have the
// RFC3597 parsing done for them and the Unpack method called
// to populate the RR instead of simply deferring to Parse.
if zp.c.Peek().token == "\\#" {
parseAsRFC3597 = true
}
} else {
rr = &RFC3597{Hdr: *h}
}
_, isPrivate := rr.(*PrivateRR)
if !isPrivate && zp.c.Peek().token == "" {
// This is a dynamic update rr.
// TODO(tmthrgd): Previously slurpRemainder was only called
// for certain RR types, which may have been important.
if err := slurpRemainder(zp.c); err != nil {
return zp.setParseError(err.err, err.lex)
}
return rr, true
} else if l.value == zNewline {
return zp.setParseError("unexpected newline", l)
}
parseAsRR := rr
if parseAsRFC3597 {
parseAsRR = &RFC3597{Hdr: *h}
}
if err := parseAsRR.parse(zp.c, zp.origin); err != nil {
// err is a concrete *ParseError without the file field set.
// The setParseError call below will construct a new
// *ParseError with file set to zp.file.
// err.lex may be nil in which case we substitute our current
// lex token.
if err.lex == (lex{}) {
return zp.setParseError(err.err, l)
}
return zp.setParseError(err.err, err.lex)
}
if parseAsRFC3597 {
err := parseAsRR.(*RFC3597).fromRFC3597(rr)
if err != nil {
return zp.setParseError(err.Error(), l)
}
}
return rr, true
}
}
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
// is not an error, because an empty zone file is still a zone file.
return nil, false
}
type zlexer struct {
br io.ByteReader
readErr error
line int
column int
comBuf string
comment string
l lex
cachedL *lex
brace int
quote bool
space bool
commt bool
rrtype bool
owner bool
nextL bool
eol bool // end-of-line
}
func newZLexer(r io.Reader) *zlexer {
br, ok := r.(io.ByteReader)
if !ok {
br = bufio.NewReaderSize(r, 1024)
}
return &zlexer{
br: br,
line: 1,
owner: true,
}
}
func (zl *zlexer) Err() error {
if zl.readErr == io.EOF {
return nil
}
return zl.readErr
}
// readByte returns the next byte from the input
func (zl *zlexer) readByte() (byte, bool) {
if zl.readErr != nil {
return 0, false
}
c, err := zl.br.ReadByte()
if err != nil {
zl.readErr = err
return 0, false
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if zl.eol {
zl.line++
zl.column = 0
zl.eol = false
}
if c == '\n' {
zl.eol = true
} else {
zl.column++
}
return c, true
}
func (zl *zlexer) Peek() lex {
if zl.nextL {
return zl.l
}
l, ok := zl.Next()
if !ok {
return l
}
if zl.nextL {
// Cache l. Next returns zl.cachedL then zl.l.
zl.cachedL = &l
} else {
// In this case l == zl.l, so we just tell Next to return zl.l.
zl.nextL = true
}
return l
}
func (zl *zlexer) Next() (lex, bool) {
l := &zl.l
switch {
case zl.cachedL != nil:
l, zl.cachedL = zl.cachedL, nil
return *l, true
case zl.nextL:
zl.nextL = false
return *l, true
case l.err:
// Parsing errors should be sticky.
return lex{value: zEOF}, false
}
var (
str [maxTok]byte // Hold string text
com [maxTok]byte // Hold comment text
stri int // Offset in str (0 means empty)
comi int // Offset in com (0 means empty)
escape bool
)
if zl.comBuf != "" {
comi = copy(com[:], zl.comBuf)
zl.comBuf = ""
}
zl.comment = ""
for x, ok := zl.readByte(); ok; x, ok = zl.readByte() {
l.line, l.column = zl.line, zl.column
if stri >= len(str) {
l.token = "token length insufficient for parsing"
l.err = true
return *l, true
}
if comi >= len(com) {
l.token = "comment length insufficient for parsing"
l.err = true
return *l, true
}
switch x {
case ' ', '\t':
if escape || zl.quote {
// Inside quotes or escaped this is legal.
str[stri] = x
stri++
escape = false
break
}
if zl.commt {
com[comi] = x
comi++
break
}
var retL lex
if stri == 0 {
// Space directly in the beginning, handled in the grammar
} else if zl.owner {
// If we have a string and its the first, make it an owner
l.value = zOwner
l.token = string(str[:stri])
// escape $... start with a \ not a $, so this will work
switch strings.ToUpper(l.token) {
case "$TTL":
l.value = zDirTTL
case "$ORIGIN":
l.value = zDirOrigin
case "$INCLUDE":
l.value = zDirInclude
case "$GENERATE":
l.value = zDirGenerate
}
retL = *l
} else {
l.value = zString
l.token = string(str[:stri])
if !zl.rrtype {
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; ok {
l.value = zRrtpe
l.torc = t
zl.rrtype = true
} else if strings.HasPrefix(tokenUpper, "TYPE") {
t, ok := typeToInt(l.token)
if !ok {
l.token = "unknown RR type"
l.err = true
return *l, true
}
l.value = zRrtpe
l.torc = t
zl.rrtype = true
}
if t, ok := StringToClass[tokenUpper]; ok {
l.value = zClass
l.torc = t
} else if strings.HasPrefix(tokenUpper, "CLASS") {
t, ok := classToInt(l.token)
if !ok {
l.token = "unknown class"
l.err = true
return *l, true
}
l.value = zClass
l.torc = t
}
}
retL = *l
}
zl.owner = false
if !zl.space {
zl.space = true
l.value = zBlank
l.token = " "
if retL == (lex{}) {
return *l, true
}
zl.nextL = true
}
if retL != (lex{}) {
return retL, true
}
case ';':
if escape || zl.quote {
// Inside quotes or escaped this is legal.
str[stri] = x
stri++
escape = false
break
}
zl.commt = true
zl.comBuf = ""
if comi > 1 {
// A newline was previously seen inside a comment that
// was inside braces and we delayed adding it until now.
com[comi] = ' ' // convert newline to space
comi++
if comi >= len(com) {
l.token = "comment length insufficient for parsing"
l.err = true
return *l, true
}
}
com[comi] = ';'
comi++
if stri > 0 {
zl.comBuf = string(com[:comi])
l.value = zString
l.token = string(str[:stri])
return *l, true
}
case '\r':
escape = false
if zl.quote {
str[stri] = x
stri++
}
// discard if outside of quotes
case '\n':
escape = false
// Escaped newline
if zl.quote {
str[stri] = x
stri++
break
}
if zl.commt {
// Reset a comment
zl.commt = false
zl.rrtype = false
// If not in a brace this ends the comment AND the RR
if zl.brace == 0 {
zl.owner = true
l.value = zNewline
l.token = "\n"
zl.comment = string(com[:comi])
return *l, true
}
zl.comBuf = string(com[:comi])
break
}
if zl.brace == 0 {
// If there is previous text, we should output it here
var retL lex
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
if !zl.rrtype {
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; ok {
zl.rrtype = true
l.value = zRrtpe
l.torc = t
}
}
retL = *l
}
l.value = zNewline
l.token = "\n"
zl.comment = zl.comBuf
zl.comBuf = ""
zl.rrtype = false
zl.owner = true
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
}
case '\\':
// comments do not get escaped chars, everything is copied
if zl.commt {
com[comi] = x
comi++
break
}
// something already escaped must be in string
if escape {
str[stri] = x
stri++
escape = false
break
}
// something escaped outside of string gets added to string
str[stri] = x
stri++
escape = true
case '"':
if zl.commt {
com[comi] = x
comi++
break
}
if escape {
str[stri] = x
stri++
escape = false
break
}
zl.space = false
// send previous gathered text and the quote
var retL lex
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
retL = *l
}
// send quote itself as separate token
l.value = zQuote
l.token = "\""
zl.quote = !zl.quote
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
case '(', ')':
if zl.commt {
com[comi] = x
comi++
break
}
if escape || zl.quote {
// Inside quotes or escaped this is legal.
str[stri] = x
stri++
escape = false
break
}
switch x {
case ')':
zl.brace--
if zl.brace < 0 {
l.token = "extra closing brace"
l.err = true
return *l, true
}
case '(':
zl.brace++
}
default:
escape = false
if zl.commt {
com[comi] = x
comi++
break
}
str[stri] = x
stri++
zl.space = false
}
}
if zl.readErr != nil && zl.readErr != io.EOF {
// Don't return any tokens after a read error occurs.
return lex{value: zEOF}, false
}
var retL lex
if stri > 0 {
// Send remainder of str
l.value = zString
l.token = string(str[:stri])
retL = *l
if comi <= 0 {
return retL, true
}
}
if comi > 0 {
// Send remainder of com
l.value = zNewline
l.token = "\n"
zl.comment = string(com[:comi])
if retL != (lex{}) {
zl.nextL = true
return retL, true
}
return *l, true
}
if zl.brace != 0 {
l.token = "unbalanced brace"
l.err = true
return *l, true
}
return lex{value: zEOF}, false
}
func (zl *zlexer) Comment() string {
if zl.l.err {
return ""
}
return zl.comment
}
// Extract the class number from CLASSxx
func classToInt(token string) (uint16, bool) {
offset := 5
if len(token) < offset+1 {
return 0, false
}
class, err := strconv.ParseUint(token[offset:], 10, 16)
if err != nil {
return 0, false
}
return uint16(class), true
}
// Extract the rr number from TYPExxx
func typeToInt(token string) (uint16, bool) {
offset := 4
if len(token) < offset+1 {
return 0, false
}
typ, err := strconv.ParseUint(token[offset:], 10, 16)
if err != nil {
return 0, false
}
return uint16(typ), true
}
// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
func stringToTTL(token string) (uint32, bool) {
var s, i uint32
for _, c := range token {
switch c {
case 's', 'S':
s += i
i = 0
case 'm', 'M':
s += i * 60
i = 0
case 'h', 'H':
s += i * 60 * 60
i = 0
case 'd', 'D':
s += i * 60 * 60 * 24
i = 0
case 'w', 'W':
s += i * 60 * 60 * 24 * 7
i = 0
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
i *= 10
i += uint32(c) - '0'
default:
return 0, false
}
}
return s + i, true
}
// Parse LOC records' <digits>[.<digits>][mM] into a
// mantissa exponent format. Token should contain the entire
// string (i.e. no spaces allowed)
func stringToCm(token string) (e, m uint8, ok bool) {
if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
token = token[0 : len(token)-1]
}
s := strings.SplitN(token, ".", 2)
var meters, cmeters, val int
var err error
switch len(s) {
case 2:
if cmeters, err = strconv.Atoi(s[1]); err != nil {
return
}
// There's no point in having more than 2 digits in this part, and would rather make the implementation complicated ('123' should be treated as '12').
// So we simply reject it.
// We also make sure the first character is a digit to reject '+-' signs.
if len(s[1]) > 2 || s[1][0] < '0' || s[1][0] > '9' {
return
}
if len(s[1]) == 1 {
// 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm.
cmeters *= 10
}
if s[0] == "" {
// This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm).
break
}
fallthrough
case 1:
if meters, err = strconv.Atoi(s[0]); err != nil {
return
}
// RFC1876 states the max value is 90000000.00. The latter two conditions enforce it.
if s[0][0] < '0' || s[0][0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) {
return
}
case 0:
// huh?
return 0, 0, false
}
ok = true
if meters > 0 {
e = 2
val = meters
} else {
e = 0
val = cmeters
}
for val >= 10 {
e++
val /= 10
}
m = uint8(val)
return
}
func toAbsoluteName(name, origin string) (absolute string, ok bool) {
// check for an explicit origin reference
if name == "@" {
// require a nonempty origin
if origin == "" {
return "", false
}
return origin, true
}
// require a valid domain name
_, ok = IsDomainName(name)
if !ok || name == "" {
return "", false
}
// check if name is already absolute
if IsFqdn(name) {
return name, true
}
// require a nonempty origin
if origin == "" {
return "", false
}
return appendOrigin(name, origin), true
}
func appendOrigin(name, origin string) string {
if origin == "." {
return name + origin
}
return name + "." + origin
}
// LOC record helper function
func locCheckNorth(token string, latitude uint32) (uint32, bool) {
if latitude > 90*1000*60*60 {
return latitude, false
}
switch token {
case "n", "N":
return LOC_EQUATOR + latitude, true
case "s", "S":
return LOC_EQUATOR - latitude, true
}
return latitude, false
}
// LOC record helper function
func locCheckEast(token string, longitude uint32) (uint32, bool) {
if longitude > 180*1000*60*60 {
return longitude, false
}
switch token {
case "e", "E":
return LOC_EQUATOR + longitude, true
case "w", "W":
return LOC_EQUATOR - longitude, true
}
return longitude, false
}
// "Eat" the rest of the "line"
func slurpRemainder(c *zlexer) *ParseError {
l, _ := c.Next()
switch l.value {
case zBlank:
l, _ = c.Next()
if l.value != zNewline && l.value != zEOF {
return &ParseError{"", "garbage after rdata", l}
}
case zNewline:
case zEOF:
default:
return &ParseError{"", "garbage after rdata", l}
}
return nil
}
// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
// Used for NID and L64 record.
func stringToNodeID(l lex) (uint64, *ParseError) {
if len(l.token) < 19 {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
// There must be three colons at fixes positions, if not its a parse error
if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
return u, nil
}
package dns
import (
"bytes"
"encoding/base64"
"net"
"strconv"
"strings"
)
// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
// or an error
func endingToString(c *zlexer, errstr string) (string, *ParseError) {
var buffer bytes.Buffer
l, _ := c.Next() // zString
for l.value != zNewline && l.value != zEOF {
if l.err {
return buffer.String(), &ParseError{"", errstr, l}
}
switch l.value {
case zString:
buffer.WriteString(l.token)
case zBlank: // Ok
default:
return "", &ParseError{"", errstr, l}
}
l, _ = c.Next()
}
return buffer.String(), nil
}
// A remainder of the rdata with embedded spaces, split on unquoted whitespace
// and return the parsed string slice or an error
func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) {
// Get the remaining data until we see a zNewline
l, _ := c.Next()
if l.err {
return nil, &ParseError{"", errstr, l}
}
// Build the slice
s := make([]string, 0)
quote := false
empty := false
for l.value != zNewline && l.value != zEOF {
if l.err {
return nil, &ParseError{"", errstr, l}
}
switch l.value {
case zString:
empty = false
if len(l.token) > 255 {
// split up tokens that are larger than 255 into 255-chunks
sx := []string{}
p, i := 0, 255
for {
if i <= len(l.token) {
sx = append(sx, l.token[p:i])
} else {
sx = append(sx, l.token[p:])
break
}
p, i = p+255, i+255
}
s = append(s, sx...)
break
}
s = append(s, l.token)
case zBlank:
if quote {
// zBlank can only be seen in between txt parts.
return nil, &ParseError{"", errstr, l}
}
case zQuote:
if empty && quote {
s = append(s, "")
}
quote = !quote
empty = true
default:
return nil, &ParseError{"", errstr, l}
}
l, _ = c.Next()
}
if quote {
return nil, &ParseError{"", errstr, l}
}
return s, nil
}
func (rr *A) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
rr.A = net.ParseIP(l.token)
// IPv4 addresses cannot include ":".
// We do this rather than use net.IP's To4() because
// To4() treats IPv4-mapped IPv6 addresses as being
// IPv4.
isIPv4 := !strings.Contains(l.token, ":")
if rr.A == nil || !isIPv4 || l.err {
return &ParseError{"", "bad A A", l}
}
return slurpRemainder(c)
}
func (rr *AAAA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
rr.AAAA = net.ParseIP(l.token)
// IPv6 addresses must include ":", and IPv4
// addresses cannot include ":".
isIPv6 := strings.Contains(l.token, ":")
if rr.AAAA == nil || !isIPv6 || l.err {
return &ParseError{"", "bad AAAA AAAA", l}
}
return slurpRemainder(c)
}
func (rr *NS) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NS Ns", l}
}
rr.Ns = name
return slurpRemainder(c)
}
func (rr *PTR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad PTR Ptr", l}
}
rr.Ptr = name
return slurpRemainder(c)
}
func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NSAP-PTR Ptr", l}
}
rr.Ptr = name
return slurpRemainder(c)
}
func (rr *RP) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
mbox, mboxOk := toAbsoluteName(l.token, o)
if l.err || !mboxOk {
return &ParseError{"", "bad RP Mbox", l}
}
rr.Mbox = mbox
c.Next() // zBlank
l, _ = c.Next()
rr.Txt = l.token
txt, txtOk := toAbsoluteName(l.token, o)
if l.err || !txtOk {
return &ParseError{"", "bad RP Txt", l}
}
rr.Txt = txt
return slurpRemainder(c)
}
func (rr *MR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MR Mr", l}
}
rr.Mr = name
return slurpRemainder(c)
}
func (rr *MB) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MB Mb", l}
}
rr.Mb = name
return slurpRemainder(c)
}
func (rr *MG) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MG Mg", l}
}
rr.Mg = name
return slurpRemainder(c)
}
func (rr *HINFO) parse(c *zlexer, o string) *ParseError {
chunks, e := endingToTxtSlice(c, "bad HINFO Fields")
if e != nil {
return e
}
if ln := len(chunks); ln == 0 {
return nil
} else if ln == 1 {
// Can we split it?
if out := strings.Fields(chunks[0]); len(out) > 1 {
chunks = out
} else {
chunks = append(chunks, "")
}
}
rr.Cpu = chunks[0]
rr.Os = strings.Join(chunks[1:], " ")
return nil
}
func (rr *MINFO) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
rmail, rmailOk := toAbsoluteName(l.token, o)
if l.err || !rmailOk {
return &ParseError{"", "bad MINFO Rmail", l}
}
rr.Rmail = rmail
c.Next() // zBlank
l, _ = c.Next()
rr.Email = l.token
email, emailOk := toAbsoluteName(l.token, o)
if l.err || !emailOk {
return &ParseError{"", "bad MINFO Email", l}
}
rr.Email = email
return slurpRemainder(c)
}
func (rr *MF) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MF Mf", l}
}
rr.Mf = name
return slurpRemainder(c)
}
func (rr *MD) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MD Md", l}
}
rr.Md = name
return slurpRemainder(c)
}
func (rr *MX) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad MX Pref", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Mx = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad MX Mx", l}
}
rr.Mx = name
return slurpRemainder(c)
}
func (rr *RT) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil {
return &ParseError{"", "bad RT Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Host = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad RT Host", l}
}
rr.Host = name
return slurpRemainder(c)
}
func (rr *AFSDB) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad AFSDB Subtype", l}
}
rr.Subtype = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Hostname = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad AFSDB Hostname", l}
}
rr.Hostname = name
return slurpRemainder(c)
}
func (rr *X25) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if l.err {
return &ParseError{"", "bad X25 PSDNAddress", l}
}
rr.PSDNAddress = l.token
return slurpRemainder(c)
}
func (rr *KX) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad KX Pref", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Exchanger = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad KX Exchanger", l}
}
rr.Exchanger = name
return slurpRemainder(c)
}
func (rr *CNAME) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad CNAME Target", l}
}
rr.Target = name
return slurpRemainder(c)
}
func (rr *DNAME) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad DNAME Target", l}
}
rr.Target = name
return slurpRemainder(c)
}
func (rr *SOA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
ns, nsOk := toAbsoluteName(l.token, o)
if l.err || !nsOk {
return &ParseError{"", "bad SOA Ns", l}
}
rr.Ns = ns
c.Next() // zBlank
l, _ = c.Next()
rr.Mbox = l.token
mbox, mboxOk := toAbsoluteName(l.token, o)
if l.err || !mboxOk {
return &ParseError{"", "bad SOA Mbox", l}
}
rr.Mbox = mbox
c.Next() // zBlank
var (
v uint32
ok bool
)
for i := 0; i < 5; i++ {
l, _ = c.Next()
if l.err {
return &ParseError{"", "bad SOA zone parameter", l}
}
if j, err := strconv.ParseUint(l.token, 10, 32); err != nil {
if i == 0 {
// Serial must be a number
return &ParseError{"", "bad SOA zone parameter", l}
}
// We allow other fields to be unitful duration strings
if v, ok = stringToTTL(l.token); !ok {
return &ParseError{"", "bad SOA zone parameter", l}
}
} else {
v = uint32(j)
}
switch i {
case 0:
rr.Serial = v
c.Next() // zBlank
case 1:
rr.Refresh = v
c.Next() // zBlank
case 2:
rr.Retry = v
c.Next() // zBlank
case 3:
rr.Expire = v
c.Next() // zBlank
case 4:
rr.Minttl = v
}
}
return slurpRemainder(c)
}
func (rr *SRV) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad SRV Priority", l}
}
rr.Priority = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil || l.err {
return &ParseError{"", "bad SRV Weight", l}
}
rr.Weight = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e2 := strconv.ParseUint(l.token, 10, 16)
if e2 != nil || l.err {
return &ParseError{"", "bad SRV Port", l}
}
rr.Port = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Target = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad SRV Target", l}
}
rr.Target = name
return slurpRemainder(c)
}
func (rr *NAPTR) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad NAPTR Order", l}
}
rr.Order = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil || l.err {
return &ParseError{"", "bad NAPTR Preference", l}
}
rr.Preference = uint16(i)
// Flags
c.Next() // zBlank
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Flags", l}
}
l, _ = c.Next() // Either String or Quote
if l.value == zString {
rr.Flags = l.token
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Flags", l}
}
} else if l.value == zQuote {
rr.Flags = ""
} else {
return &ParseError{"", "bad NAPTR Flags", l}
}
// Service
c.Next() // zBlank
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Service", l}
}
l, _ = c.Next() // Either String or Quote
if l.value == zString {
rr.Service = l.token
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Service", l}
}
} else if l.value == zQuote {
rr.Service = ""
} else {
return &ParseError{"", "bad NAPTR Service", l}
}
// Regexp
c.Next() // zBlank
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Regexp", l}
}
l, _ = c.Next() // Either String or Quote
if l.value == zString {
rr.Regexp = l.token
l, _ = c.Next() // _QUOTE
if l.value != zQuote {
return &ParseError{"", "bad NAPTR Regexp", l}
}
} else if l.value == zQuote {
rr.Regexp = ""
} else {
return &ParseError{"", "bad NAPTR Regexp", l}
}
// After quote no space??
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Replacement = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NAPTR Replacement", l}
}
rr.Replacement = name
return slurpRemainder(c)
}
func (rr *TALINK) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
previousName, previousNameOk := toAbsoluteName(l.token, o)
if l.err || !previousNameOk {
return &ParseError{"", "bad TALINK PreviousName", l}
}
rr.PreviousName = previousName
c.Next() // zBlank
l, _ = c.Next()
rr.NextName = l.token
nextName, nextNameOk := toAbsoluteName(l.token, o)
if l.err || !nextNameOk {
return &ParseError{"", "bad TALINK NextName", l}
}
rr.NextName = nextName
return slurpRemainder(c)
}
func (rr *LOC) parse(c *zlexer, o string) *ParseError {
// Non zero defaults for LOC record, see RFC 1876, Section 3.
rr.Size = 0x12 // 1e2 cm (1m)
rr.HorizPre = 0x16 // 1e6 cm (10000m)
rr.VertPre = 0x13 // 1e3 cm (10m)
ok := false
// North
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err || i > 90 {
return &ParseError{"", "bad LOC Latitude", l}
}
rr.Latitude = 1000 * 60 * 60 * uint32(i)
c.Next() // zBlank
// Either number, 'N' or 'S'
l, _ = c.Next()
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
goto East
}
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 {
return &ParseError{"", "bad LOC Latitude minutes", l}
} else {
rr.Latitude += 1000 * 60 * uint32(i)
}
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 {
return &ParseError{"", "bad LOC Latitude seconds", l}
} else {
rr.Latitude += uint32(1000 * i)
}
c.Next() // zBlank
// Either number, 'N' or 'S'
l, _ = c.Next()
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
goto East
}
// If still alive, flag an error
return &ParseError{"", "bad LOC Latitude North/South", l}
East:
// East
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 180 {
return &ParseError{"", "bad LOC Longitude", l}
} else {
rr.Longitude = 1000 * 60 * 60 * uint32(i)
}
c.Next() // zBlank
// Either number, 'E' or 'W'
l, _ = c.Next()
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
goto Altitude
}
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 {
return &ParseError{"", "bad LOC Longitude minutes", l}
} else {
rr.Longitude += 1000 * 60 * uint32(i)
}
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 {
return &ParseError{"", "bad LOC Longitude seconds", l}
} else {
rr.Longitude += uint32(1000 * i)
}
c.Next() // zBlank
// Either number, 'E' or 'W'
l, _ = c.Next()
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
goto Altitude
}
// If still alive, flag an error
return &ParseError{"", "bad LOC Longitude East/West", l}
Altitude:
c.Next() // zBlank
l, _ = c.Next()
if l.token == "" || l.err {
return &ParseError{"", "bad LOC Altitude", l}
}
if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
l.token = l.token[0 : len(l.token)-1]
}
if i, err := strconv.ParseFloat(l.token, 64); err != nil {
return &ParseError{"", "bad LOC Altitude", l}
} else {
rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
}
// And now optionally the other values
l, _ = c.Next()
count := 0
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zString:
switch count {
case 0: // Size
exp, m, ok := stringToCm(l.token)
if !ok {
return &ParseError{"", "bad LOC Size", l}
}
rr.Size = exp&0x0f | m<<4&0xf0
case 1: // HorizPre
exp, m, ok := stringToCm(l.token)
if !ok {
return &ParseError{"", "bad LOC HorizPre", l}
}
rr.HorizPre = exp&0x0f | m<<4&0xf0
case 2: // VertPre
exp, m, ok := stringToCm(l.token)
if !ok {
return &ParseError{"", "bad LOC VertPre", l}
}
rr.VertPre = exp&0x0f | m<<4&0xf0
}
count++
case zBlank:
// Ok
default:
return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *HIP) parse(c *zlexer, o string) *ParseError {
// HitLength is not represented
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad HIP PublicKeyAlgorithm", l}
}
rr.PublicKeyAlgorithm = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
if l.token == "" || l.err {
return &ParseError{"", "bad HIP Hit", l}
}
rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
rr.HitLength = uint8(len(rr.Hit)) / 2
c.Next() // zBlank
l, _ = c.Next() // zString
if l.token == "" || l.err {
return &ParseError{"", "bad HIP PublicKey", l}
}
rr.PublicKey = l.token // This cannot contain spaces
decodedPK, decodedPKerr := base64.StdEncoding.DecodeString(rr.PublicKey)
if decodedPKerr != nil {
return &ParseError{"", "bad HIP PublicKey", l}
}
rr.PublicKeyLength = uint16(len(decodedPK))
// RendezvousServers (if any)
l, _ = c.Next()
var xs []string
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zString:
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad HIP RendezvousServers", l}
}
xs = append(xs, name)
case zBlank:
// Ok
default:
return &ParseError{"", "bad HIP RendezvousServers", l}
}
l, _ = c.Next()
}
rr.RendezvousServers = xs
return nil
}
func (rr *CERT) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if v, ok := StringToCertType[l.token]; ok {
rr.Type = v
} else if i, err := strconv.ParseUint(l.token, 10, 16); err != nil {
return &ParseError{"", "bad CERT Type", l}
} else {
rr.Type = uint16(i)
}
c.Next() // zBlank
l, _ = c.Next() // zString
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad CERT KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
if v, ok := StringToAlgorithm[l.token]; ok {
rr.Algorithm = v
} else if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
return &ParseError{"", "bad CERT Algorithm", l}
} else {
rr.Algorithm = uint8(i)
}
s, e1 := endingToString(c, "bad CERT Certificate")
if e1 != nil {
return e1
}
rr.Certificate = s
return nil
}
func (rr *OPENPGPKEY) parse(c *zlexer, o string) *ParseError {
s, e := endingToString(c, "bad OPENPGPKEY PublicKey")
if e != nil {
return e
}
rr.PublicKey = s
return nil
}
func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
j, e := strconv.ParseUint(l.token, 10, 32)
if e != nil {
// Serial must be a number
return &ParseError{"", "bad CSYNC serial", l}
}
rr.Serial = uint32(j)
c.Next() // zBlank
l, _ = c.Next()
j, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil {
// Serial must be a number
return &ParseError{"", "bad CSYNC flags", l}
}
rr.Flags = uint16(j)
rr.TypeBitMap = make([]uint16, 0)
var (
k uint16
ok bool
)
l, _ = c.Next()
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zBlank:
// Ok
case zString:
tokenUpper := strings.ToUpper(l.token)
if k, ok = StringToType[tokenUpper]; !ok {
if k, ok = typeToInt(l.token); !ok {
return &ParseError{"", "bad CSYNC TypeBitMap", l}
}
}
rr.TypeBitMap = append(rr.TypeBitMap, k)
default:
return &ParseError{"", "bad CSYNC TypeBitMap", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return &ParseError{"", "bad ZONEMD Serial", l}
}
rr.Serial = uint32(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad ZONEMD Scheme", l}
}
rr.Scheme = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, err := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err {
return &ParseError{"", "bad ZONEMD Hash Algorithm", l}
}
rr.Hash = uint8(i)
s, e2 := endingToString(c, "bad ZONEMD Digest")
if e2 != nil {
return e2
}
rr.Digest = s
return nil
}
func (rr *SIG) parse(c *zlexer, o string) *ParseError { return rr.RRSIG.parse(c, o) }
func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
tokenUpper := strings.ToUpper(l.token)
if t, ok := StringToType[tokenUpper]; !ok {
if strings.HasPrefix(tokenUpper, "TYPE") {
t, ok = typeToInt(l.token)
if !ok {
return &ParseError{"", "bad RRSIG Typecovered", l}
}
rr.TypeCovered = t
} else {
return &ParseError{"", "bad RRSIG Typecovered", l}
}
} else {
rr.TypeCovered = t
}
c.Next() // zBlank
l, _ = c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad RRSIG Algorithm", l}
}
rr.Algorithm = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad RRSIG Labels", l}
}
rr.Labels = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 32)
if e2 != nil || l.err {
return &ParseError{"", "bad RRSIG OrigTtl", l}
}
rr.OrigTtl = uint32(i)
c.Next() // zBlank
l, _ = c.Next()
if i, err := StringToTime(l.token); err != nil {
// Try to see if all numeric and use it as epoch
if i, err := strconv.ParseUint(l.token, 10, 32); err == nil {
rr.Expiration = uint32(i)
} else {
return &ParseError{"", "bad RRSIG Expiration", l}
}
} else {
rr.Expiration = i
}
c.Next() // zBlank
l, _ = c.Next()
if i, err := StringToTime(l.token); err != nil {
if i, err := strconv.ParseUint(l.token, 10, 32); err == nil {
rr.Inception = uint32(i)
} else {
return &ParseError{"", "bad RRSIG Inception", l}
}
} else {
rr.Inception = i
}
c.Next() // zBlank
l, _ = c.Next()
i, e3 := strconv.ParseUint(l.token, 10, 16)
if e3 != nil || l.err {
return &ParseError{"", "bad RRSIG KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
rr.SignerName = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad RRSIG SignerName", l}
}
rr.SignerName = name
s, e4 := endingToString(c, "bad RRSIG Signature")
if e4 != nil {
return e4
}
rr.Signature = s
return nil
}
func (rr *NSEC) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad NSEC NextDomain", l}
}
rr.NextDomain = name
rr.TypeBitMap = make([]uint16, 0)
var (
k uint16
ok bool
)
l, _ = c.Next()
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zBlank:
// Ok
case zString:
tokenUpper := strings.ToUpper(l.token)
if k, ok = StringToType[tokenUpper]; !ok {
if k, ok = typeToInt(l.token); !ok {
return &ParseError{"", "bad NSEC TypeBitMap", l}
}
}
rr.TypeBitMap = append(rr.TypeBitMap, k)
default:
return &ParseError{"", "bad NSEC TypeBitMap", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad NSEC3 Hash", l}
}
rr.Hash = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad NSEC3 Flags", l}
}
rr.Flags = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 16)
if e2 != nil || l.err {
return &ParseError{"", "bad NSEC3 Iterations", l}
}
rr.Iterations = uint16(i)
c.Next()
l, _ = c.Next()
if l.token == "" || l.err {
return &ParseError{"", "bad NSEC3 Salt", l}
}
if l.token != "-" {
rr.SaltLength = uint8(len(l.token)) / 2
rr.Salt = l.token
}
c.Next()
l, _ = c.Next()
if l.token == "" || l.err {
return &ParseError{"", "bad NSEC3 NextDomain", l}
}
rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
rr.NextDomain = l.token
rr.TypeBitMap = make([]uint16, 0)
var (
k uint16
ok bool
)
l, _ = c.Next()
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zBlank:
// Ok
case zString:
tokenUpper := strings.ToUpper(l.token)
if k, ok = StringToType[tokenUpper]; !ok {
if k, ok = typeToInt(l.token); !ok {
return &ParseError{"", "bad NSEC3 TypeBitMap", l}
}
}
rr.TypeBitMap = append(rr.TypeBitMap, k)
default:
return &ParseError{"", "bad NSEC3 TypeBitMap", l}
}
l, _ = c.Next()
}
return nil
}
func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Hash", l}
}
rr.Hash = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Flags", l}
}
rr.Flags = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 16)
if e2 != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Iterations", l}
}
rr.Iterations = uint16(i)
c.Next()
l, _ = c.Next()
if l.token != "-" {
rr.SaltLength = uint8(len(l.token) / 2)
rr.Salt = l.token
}
return slurpRemainder(c)
}
func (rr *EUI48) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if len(l.token) != 17 || l.err {
return &ParseError{"", "bad EUI48 Address", l}
}
addr := make([]byte, 12)
dash := 0
for i := 0; i < 10; i += 2 {
addr[i] = l.token[i+dash]
addr[i+1] = l.token[i+1+dash]
dash++
if l.token[i+1+dash] != '-' {
return &ParseError{"", "bad EUI48 Address", l}
}
}
addr[10] = l.token[15]
addr[11] = l.token[16]
i, e := strconv.ParseUint(string(addr), 16, 48)
if e != nil {
return &ParseError{"", "bad EUI48 Address", l}
}
rr.Address = i
return slurpRemainder(c)
}
func (rr *EUI64) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if len(l.token) != 23 || l.err {
return &ParseError{"", "bad EUI64 Address", l}
}
addr := make([]byte, 16)
dash := 0
for i := 0; i < 14; i += 2 {
addr[i] = l.token[i+dash]
addr[i+1] = l.token[i+1+dash]
dash++
if l.token[i+1+dash] != '-' {
return &ParseError{"", "bad EUI64 Address", l}
}
}
addr[14] = l.token[21]
addr[15] = l.token[22]
i, e := strconv.ParseUint(string(addr), 16, 64)
if e != nil {
return &ParseError{"", "bad EUI68 Address", l}
}
rr.Address = i
return slurpRemainder(c)
}
func (rr *SSHFP) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad SSHFP Algorithm", l}
}
rr.Algorithm = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad SSHFP Type", l}
}
rr.Type = uint8(i)
c.Next() // zBlank
s, e2 := endingToString(c, "bad SSHFP Fingerprint")
if e2 != nil {
return e2
}
rr.FingerPrint = s
return nil
}
func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad " + typ + " Flags", l}
}
rr.Flags = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad " + typ + " Protocol", l}
}
rr.Protocol = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad " + typ + " Algorithm", l}
}
rr.Algorithm = uint8(i)
s, e3 := endingToString(c, "bad "+typ+" PublicKey")
if e3 != nil {
return e3
}
rr.PublicKey = s
return nil
}
func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "DNSKEY") }
func (rr *KEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "KEY") }
func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "CDNSKEY") }
func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DS") }
func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") }
func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") }
func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad RKEY Flags", l}
}
rr.Flags = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad RKEY Protocol", l}
}
rr.Protocol = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad RKEY Algorithm", l}
}
rr.Algorithm = uint8(i)
s, e3 := endingToString(c, "bad RKEY PublicKey")
if e3 != nil {
return e3
}
rr.PublicKey = s
return nil
}
func (rr *EID) parse(c *zlexer, o string) *ParseError {
s, e := endingToString(c, "bad EID Endpoint")
if e != nil {
return e
}
rr.Endpoint = s
return nil
}
func (rr *NIMLOC) parse(c *zlexer, o string) *ParseError {
s, e := endingToString(c, "bad NIMLOC Locator")
if e != nil {
return e
}
rr.Locator = s
return nil
}
func (rr *GPOS) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
_, e := strconv.ParseFloat(l.token, 64)
if e != nil || l.err {
return &ParseError{"", "bad GPOS Longitude", l}
}
rr.Longitude = l.token
c.Next() // zBlank
l, _ = c.Next()
_, e1 := strconv.ParseFloat(l.token, 64)
if e1 != nil || l.err {
return &ParseError{"", "bad GPOS Latitude", l}
}
rr.Latitude = l.token
c.Next() // zBlank
l, _ = c.Next()
_, e2 := strconv.ParseFloat(l.token, 64)
if e2 != nil || l.err {
return &ParseError{"", "bad GPOS Altitude", l}
}
rr.Altitude = l.token
return slurpRemainder(c)
}
func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad " + typ + " KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
tokenUpper := strings.ToUpper(l.token)
i, ok := StringToAlgorithm[tokenUpper]
if !ok || l.err {
return &ParseError{"", "bad " + typ + " Algorithm", l}
}
rr.Algorithm = i
} else {
rr.Algorithm = uint8(i)
}
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad " + typ + " DigestType", l}
}
rr.DigestType = uint8(i)
s, e2 := endingToString(c, "bad "+typ+" Digest")
if e2 != nil {
return e2
}
rr.Digest = s
return nil
}
func (rr *TA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad TA KeyTag", l}
}
rr.KeyTag = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
tokenUpper := strings.ToUpper(l.token)
i, ok := StringToAlgorithm[tokenUpper]
if !ok || l.err {
return &ParseError{"", "bad TA Algorithm", l}
}
rr.Algorithm = i
} else {
rr.Algorithm = uint8(i)
}
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad TA DigestType", l}
}
rr.DigestType = uint8(i)
s, e2 := endingToString(c, "bad TA Digest")
if e2 != nil {
return e2
}
rr.Digest = s
return nil
}
func (rr *TLSA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad TLSA Usage", l}
}
rr.Usage = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad TLSA Selector", l}
}
rr.Selector = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad TLSA MatchingType", l}
}
rr.MatchingType = uint8(i)
// So this needs be e2 (i.e. different than e), because...??t
s, e3 := endingToString(c, "bad TLSA Certificate")
if e3 != nil {
return e3
}
rr.Certificate = s
return nil
}
func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad SMIMEA Usage", l}
}
rr.Usage = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad SMIMEA Selector", l}
}
rr.Selector = uint8(i)
c.Next() // zBlank
l, _ = c.Next()
i, e2 := strconv.ParseUint(l.token, 10, 8)
if e2 != nil || l.err {
return &ParseError{"", "bad SMIMEA MatchingType", l}
}
rr.MatchingType = uint8(i)
// So this needs be e2 (i.e. different than e), because...??t
s, e3 := endingToString(c, "bad SMIMEA Certificate")
if e3 != nil {
return e3
}
rr.Certificate = s
return nil
}
func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
if l.token != "\\#" {
return &ParseError{"", "bad RFC3597 Rdata", l}
}
c.Next() // zBlank
l, _ = c.Next()
rdlength, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad RFC3597 Rdata ", l}
}
s, e1 := endingToString(c, "bad RFC3597 Rdata")
if e1 != nil {
return e1
}
if int(rdlength)*2 != len(s) {
return &ParseError{"", "bad RFC3597 Rdata", l}
}
rr.Rdata = s
return nil
}
func (rr *SPF) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad SPF Txt")
if e != nil {
return e
}
rr.Txt = s
return nil
}
func (rr *AVC) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad AVC Txt")
if e != nil {
return e
}
rr.Txt = s
return nil
}
func (rr *TXT) parse(c *zlexer, o string) *ParseError {
// no zBlank reading here, because all this rdata is TXT
s, e := endingToTxtSlice(c, "bad TXT Txt")
if e != nil {
return e
}
rr.Txt = s
return nil
}
// identical to setTXT
func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad NINFO ZSData")
if e != nil {
return e
}
rr.ZSData = s
return nil
}
func (rr *URI) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad URI Priority", l}
}
rr.Priority = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 16)
if e1 != nil || l.err {
return &ParseError{"", "bad URI Weight", l}
}
rr.Weight = uint16(i)
c.Next() // zBlank
s, e2 := endingToTxtSlice(c, "bad URI Target")
if e2 != nil {
return e2
}
if len(s) != 1 {
return &ParseError{"", "bad URI Target", l}
}
rr.Target = s[0]
return nil
}
func (rr *DHCID) parse(c *zlexer, o string) *ParseError {
// awesome record to parse!
s, e := endingToString(c, "bad DHCID Digest")
if e != nil {
return e
}
rr.Digest = s
return nil
}
func (rr *NID) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad NID Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
u, e1 := stringToNodeID(l)
if e1 != nil || l.err {
return e1
}
rr.NodeID = u
return slurpRemainder(c)
}
func (rr *L32) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad L32 Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Locator32 = net.ParseIP(l.token)
if rr.Locator32 == nil || l.err {
return &ParseError{"", "bad L32 Locator", l}
}
return slurpRemainder(c)
}
func (rr *LP) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad LP Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Fqdn = l.token
name, nameOk := toAbsoluteName(l.token, o)
if l.err || !nameOk {
return &ParseError{"", "bad LP Fqdn", l}
}
rr.Fqdn = name
return slurpRemainder(c)
}
func (rr *L64) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad L64 Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
u, e1 := stringToNodeID(l)
if e1 != nil || l.err {
return e1
}
rr.Locator64 = u
return slurpRemainder(c)
}
func (rr *UID) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return &ParseError{"", "bad UID Uid", l}
}
rr.Uid = uint32(i)
return slurpRemainder(c)
}
func (rr *GID) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return &ParseError{"", "bad GID Gid", l}
}
rr.Gid = uint32(i)
return slurpRemainder(c)
}
func (rr *UINFO) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad UINFO Uinfo")
if e != nil {
return e
}
if ln := len(s); ln == 0 {
return nil
}
rr.Uinfo = s[0] // silently discard anything after the first character-string
return nil
}
func (rr *PX) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return &ParseError{"", "bad PX Preference", l}
}
rr.Preference = uint16(i)
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Map822 = l.token
map822, map822Ok := toAbsoluteName(l.token, o)
if l.err || !map822Ok {
return &ParseError{"", "bad PX Map822", l}
}
rr.Map822 = map822
c.Next() // zBlank
l, _ = c.Next() // zString
rr.Mapx400 = l.token
mapx400, mapx400Ok := toAbsoluteName(l.token, o)
if l.err || !mapx400Ok {
return &ParseError{"", "bad PX Mapx400", l}
}
rr.Mapx400 = mapx400
return slurpRemainder(c)
}
func (rr *CAA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad CAA Flag", l}
}
rr.Flag = uint8(i)
c.Next() // zBlank
l, _ = c.Next() // zString
if l.value != zString {
return &ParseError{"", "bad CAA Tag", l}
}
rr.Tag = l.token
c.Next() // zBlank
s, e1 := endingToTxtSlice(c, "bad CAA Value")
if e1 != nil {
return e1
}
if len(s) != 1 {
return &ParseError{"", "bad CAA Value", l}
}
rr.Value = s[0]
return nil
}
func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
// Algorithm
if l.value != zString {
return &ParseError{"", "bad TKEY algorithm", l}
}
rr.Algorithm = l.token
c.Next() // zBlank
// Get the key length and key values
l, _ = c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return &ParseError{"", "bad TKEY key length", l}
}
rr.KeySize = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if l.value != zString {
return &ParseError{"", "bad TKEY key", l}
}
rr.Key = l.token
c.Next() // zBlank
// Get the otherdata length and string data
l, _ = c.Next()
i, e1 := strconv.ParseUint(l.token, 10, 8)
if e1 != nil || l.err {
return &ParseError{"", "bad TKEY otherdata length", l}
}
rr.OtherLen = uint16(i)
c.Next() // zBlank
l, _ = c.Next()
if l.value != zString {
return &ParseError{"", "bad TKEY otherday", l}
}
rr.OtherData = l.token
return nil
}
func (rr *APL) parse(c *zlexer, o string) *ParseError {
var prefixes []APLPrefix
for {
l, _ := c.Next()
if l.value == zNewline || l.value == zEOF {
break
}
if l.value == zBlank && prefixes != nil {
continue
}
if l.value != zString {
return &ParseError{"", "unexpected APL field", l}
}
// Expected format: [!]afi:address/prefix
colon := strings.IndexByte(l.token, ':')
if colon == -1 {
return &ParseError{"", "missing colon in APL field", l}
}
family, cidr := l.token[:colon], l.token[colon+1:]
var negation bool
if family != "" && family[0] == '!' {
negation = true
family = family[1:]
}
afi, e := strconv.ParseUint(family, 10, 16)
if e != nil {
return &ParseError{"", "failed to parse APL family: " + e.Error(), l}
}
var addrLen int
switch afi {
case 1:
addrLen = net.IPv4len
case 2:
addrLen = net.IPv6len
default:
return &ParseError{"", "unrecognized APL family", l}
}
ip, subnet, e1 := net.ParseCIDR(cidr)
if e1 != nil {
return &ParseError{"", "failed to parse APL address: " + e1.Error(), l}
}
if !ip.Equal(subnet.IP) {
return &ParseError{"", "extra bits in APL address", l}
}
if len(subnet.IP) != addrLen {
return &ParseError{"", "address mismatch with the APL family", l}
}
prefixes = append(prefixes, APLPrefix{
Negation: negation,
Network: *subnet,
})
}
rr.Prefixes = prefixes
return nil
}
package dns
import (
"sync"
)
// ServeMux is an DNS request multiplexer. It matches the zone name of
// each incoming request against a list of registered patterns add calls
// the handler for the pattern that most closely matches the zone name.
//
// ServeMux is DNSSEC aware, meaning that queries for the DS record are
// redirected to the parent zone (if that is also registered), otherwise
// the child gets the query.
//
// ServeMux is also safe for concurrent access from multiple goroutines.
//
// The zero ServeMux is empty and ready for use.
type ServeMux struct {
z map[string]Handler
m sync.RWMutex
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux {
return new(ServeMux)
}
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()
func (mux *ServeMux) match(q string, t uint16) Handler {
mux.m.RLock()
defer mux.m.RUnlock()
if mux.z == nil {
return nil
}
q = CanonicalName(q)
var handler Handler
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
if h, ok := mux.z[q[off:]]; ok {
if t != TypeDS {
return h
}
// Continue for DS to see if we have a parent too, if so delegate to the parent
handler = h
}
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := mux.z["."]; ok {
return h
}
return handler
}
// Handle adds a handler to the ServeMux for pattern.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
if mux.z == nil {
mux.z = make(map[string]Handler)
}
mux.z[CanonicalName(pattern)] = handler
mux.m.Unlock()
}
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// HandleRemove deregisters the handler specific for pattern from the ServeMux.
func (mux *ServeMux) HandleRemove(pattern string) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
delete(mux.z, CanonicalName(pattern))
mux.m.Unlock()
}
// ServeDNS dispatches the request to the handler whose pattern most
// closely matches the request message.
//
// ServeDNS is DNSSEC aware, meaning that queries for the DS record
// are redirected to the parent zone (if that is also registered),
// otherwise the child gets the query.
//
// If no handler is found, or there is no question, a standard REFUSED
// message is returned
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
var h Handler
if len(req.Question) >= 1 { // allow more than one question
h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
}
if h != nil {
h.ServeDNS(w, req)
} else {
handleRefused(w, req)
}
}
// Handle registers the handler with the given pattern
// in the DefaultServeMux. The documentation for
// ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleRemove deregisters the handle with the given pattern
// in the DefaultServeMux.
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
// HandleFunc registers the handler function with the given pattern
// in the DefaultServeMux.
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
// DNS server implementation.
package dns
import (
"context"
"crypto/tls"
"encoding/binary"
"errors"
"io"
"net"
"strings"
"sync"
"time"
)
// Default maximum number of TCP queries before we close the socket.
const maxTCPQueries = 128
// aLongTimeAgo is a non-zero time, far in the past, used for
// immediate cancelation of network operations.
var aLongTimeAgo = time.Unix(1, 0)
// Handler is implemented by any value that implements ServeDNS.
type Handler interface {
ServeDNS(w ResponseWriter, r *Msg)
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as DNS handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Msg)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
f(w, r)
}
// A ResponseWriter interface is used by an DNS handler to
// construct an DNS response.
type ResponseWriter interface {
// LocalAddr returns the net.Addr of the server
LocalAddr() net.Addr
// RemoteAddr returns the net.Addr of the client that sent the current request.
RemoteAddr() net.Addr
// WriteMsg writes a reply back to the client.
WriteMsg(*Msg) error
// Write writes a raw buffer back to the client.
Write([]byte) (int, error)
// Close closes the connection.
Close() error
// TsigStatus returns the status of the Tsig.
TsigStatus() error
// TsigTimersOnly sets the tsig timers only boolean.
TsigTimersOnly(bool)
// Hijack lets the caller take over the connection.
// After a call to Hijack(), the DNS package will not do anything with the connection.
Hijack()
}
// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
// when available.
type ConnectionStater interface {
ConnectionState() *tls.ConnectionState
}
type response struct {
closed bool // connection has been closed
hijacked bool // connection has been hijacked by handler
tsigTimersOnly bool
tsigStatus error
tsigRequestMAC string
tsigProvider TsigProvider
udp net.PacketConn // i/o connection if UDP was used
tcp net.Conn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right
pcSession net.Addr // address to use when writing to a generic net.PacketConn
writer Writer // writer to output the raw DNS bits
}
// handleRefused returns a HandlerFunc that returns REFUSED for every request it gets.
func handleRefused(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeRefused)
w.WriteMsg(m)
}
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
// Deprecated: This function is going away.
func HandleFailed(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeServerFailure)
// does not matter if this write fails
w.WriteMsg(m)
}
// ListenAndServe Starts a server on address and network specified Invoke handler
// for incoming queries.
func ListenAndServe(addr string, network string, handler Handler) error {
server := &Server{Addr: addr, Net: network, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
// http://golang.org/pkg/net/http/#ListenAndServeTLS
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
config := tls.Config{
Certificates: []tls.Certificate{cert},
}
server := &Server{
Addr: addr,
Net: "tcp-tls",
TLSConfig: &config,
Handler: handler,
}
return server.ListenAndServe()
}
// ActivateAndServe activates a server with a listener from systemd,
// l and p should not both be non-nil.
// If both l and p are not nil only p will be used.
// Invoke handler for incoming queries.
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
server := &Server{Listener: l, PacketConn: p, Handler: handler}
return server.ActivateAndServe()
}
// Writer writes raw DNS messages; each call to Write should send an entire message.
type Writer interface {
io.Writer
}
// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
type Reader interface {
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
}
// PacketConnReader is an optional interface that Readers can implement to support using generic net.PacketConns.
type PacketConnReader interface {
Reader
// ReadPacketConn reads a raw message from a generic net.PacketConn UDP connection. Implementations may
// alter connection properties, for example the read-deadline.
ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error)
}
// defaultReader is an adapter for the Server struct that implements the Reader and
// PacketConnReader interfaces using the readTCP, readUDP and readPacketConn funcs
// of the embedded Server.
type defaultReader struct {
*Server
}
var _ PacketConnReader = defaultReader{}
func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
return dr.readTCP(conn, timeout)
}
func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
return dr.readUDP(conn, timeout)
}
func (dr defaultReader) ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) {
return dr.readPacketConn(conn, timeout)
}
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
// Implementations should never return a nil Reader.
// Readers should also implement the optional PacketConnReader interface.
// PacketConnReader is required to use a generic net.PacketConn.
type DecorateReader func(Reader) Reader
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
// Implementations should never return a nil Writer.
type DecorateWriter func(Writer) Writer
// A Server defines parameters for running an DNS server.
type Server struct {
// Address to listen on, ":dns" if empty.
Addr string
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
Net string
// TCP Listener to use, this is to aid in systemd's socket activation.
Listener net.Listener
// TLS connection configuration
TLSConfig *tls.Config
// UDP "Listener" to use, this is to aid in systemd's socket activation.
PacketConn net.PacketConn
// Handler to invoke, dns.DefaultServeMux if nil.
Handler Handler
// Default buffer size to use to read incoming UDP messages. If not set
// it defaults to MinMsgSize (512 B).
UDPSize int
// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
ReadTimeout time.Duration
// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
WriteTimeout time.Duration
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
IdleTimeout func() time.Duration
// An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
TsigProvider TsigProvider
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
TsigSecret map[string]string
// If NotifyStartedFunc is set it is called once the server has started listening.
NotifyStartedFunc func()
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
DecorateReader DecorateReader
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
MaxTCPQueries int
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
// It is only supported on go1.11+ and when using ListenAndServe.
ReusePort bool
// AcceptMsgFunc will check the incoming message and will reject it early in the process.
// By default DefaultMsgAcceptFunc will be used.
MsgAcceptFunc MsgAcceptFunc
// Shutdown handling
lock sync.RWMutex
started bool
shutdown chan struct{}
conns map[net.Conn]struct{}
// A pool for UDP message buffers.
udpPool sync.Pool
}
func (srv *Server) tsigProvider() TsigProvider {
if srv.TsigProvider != nil {
return srv.TsigProvider
}
if srv.TsigSecret != nil {
return tsigSecretProvider(srv.TsigSecret)
}
return nil
}
func (srv *Server) isStarted() bool {
srv.lock.RLock()
started := srv.started
srv.lock.RUnlock()
return started
}
func makeUDPBuffer(size int) func() interface{} {
return func() interface{} {
return make([]byte, size)
}
}
func (srv *Server) init() {
srv.shutdown = make(chan struct{})
srv.conns = make(map[net.Conn]struct{})
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
if srv.MsgAcceptFunc == nil {
srv.MsgAcceptFunc = DefaultMsgAcceptFunc
}
if srv.Handler == nil {
srv.Handler = DefaultServeMux
}
srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
}
func unlockOnce(l sync.Locker) func() {
var once sync.Once
return func() { once.Do(l.Unlock) }
}
// ListenAndServe starts a nameserver on the configured address in *Server.
func (srv *Server) ListenAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
defer unlock()
if srv.started {
return &Error{err: "server already started"}
}
addr := srv.Addr
if addr == "" {
addr = ":domain"
}
srv.init()
switch srv.Net {
case "tcp", "tcp4", "tcp6":
l, err := listenTCP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
srv.Listener = l
srv.started = true
unlock()
return srv.serveTCP(l)
case "tcp-tls", "tcp4-tls", "tcp6-tls":
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
}
network := strings.TrimSuffix(srv.Net, "-tls")
l, err := listenTCP(network, addr, srv.ReusePort)
if err != nil {
return err
}
l = tls.NewListener(l, srv.TLSConfig)
srv.Listener = l
srv.started = true
unlock()
return srv.serveTCP(l)
case "udp", "udp4", "udp6":
l, err := listenUDP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
u := l.(*net.UDPConn)
if e := setUDPSocketOptions(u); e != nil {
u.Close()
return e
}
srv.PacketConn = l
srv.started = true
unlock()
return srv.serveUDP(u)
}
return &Error{err: "bad network"}
}
// ActivateAndServe starts a nameserver with the PacketConn or Listener
// configured in *Server. Its main use is to start a server from systemd.
func (srv *Server) ActivateAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
defer unlock()
if srv.started {
return &Error{err: "server already started"}
}
srv.init()
if srv.PacketConn != nil {
// Check PacketConn interface's type is valid and value
// is not nil
if t, ok := srv.PacketConn.(*net.UDPConn); ok && t != nil {
if e := setUDPSocketOptions(t); e != nil {
return e
}
}
srv.started = true
unlock()
return srv.serveUDP(srv.PacketConn)
}
if srv.Listener != nil {
srv.started = true
unlock()
return srv.serveTCP(srv.Listener)
}
return &Error{err: "bad listeners"}
}
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
// ActivateAndServe will return.
func (srv *Server) Shutdown() error {
return srv.ShutdownContext(context.Background())
}
// ShutdownContext shuts down a server. After a call to ShutdownContext,
// ListenAndServe and ActivateAndServe will return.
//
// A context.Context may be passed to limit how long to wait for connections
// to terminate.
func (srv *Server) ShutdownContext(ctx context.Context) error {
srv.lock.Lock()
if !srv.started {
srv.lock.Unlock()
return &Error{err: "server not started"}
}
srv.started = false
if srv.PacketConn != nil {
srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
if srv.Listener != nil {
srv.Listener.Close()
}
for rw := range srv.conns {
rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
srv.lock.Unlock()
if testShutdownNotify != nil {
testShutdownNotify.Broadcast()
}
var ctxErr error
select {
case <-srv.shutdown:
case <-ctx.Done():
ctxErr = ctx.Err()
}
if srv.PacketConn != nil {
srv.PacketConn.Close()
}
return ctxErr
}
var testShutdownNotify *sync.Cond
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
func (srv *Server) getReadTimeout() time.Duration {
if srv.ReadTimeout != 0 {
return srv.ReadTimeout
}
return dnsTimeout
}
// serveTCP starts a TCP listener for the server.
func (srv *Server) serveTCP(l net.Listener) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
for srv.isStarted() {
rw, err := l.Accept()
if err != nil {
if !srv.isStarted() {
return nil
}
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
continue
}
return err
}
srv.lock.Lock()
// Track the connection to allow unblocking reads on shutdown.
srv.conns[rw] = struct{}{}
srv.lock.Unlock()
wg.Add(1)
go srv.serveTCPConn(&wg, rw)
}
return nil
}
// serveUDP starts a UDP listener for the server.
func (srv *Server) serveUDP(l net.PacketConn) error {
defer l.Close()
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
lUDP, isUDP := l.(*net.UDPConn)
readerPC, canPacketConn := reader.(PacketConnReader)
if !isUDP && !canPacketConn {
return &Error{err: "PacketConnReader was not implemented on Reader returned from DecorateReader but is required for net.PacketConn"}
}
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
rtimeout := srv.getReadTimeout()
// deadline is not used here
for srv.isStarted() {
var (
m []byte
sPC net.Addr
sUDP *SessionUDP
err error
)
if isUDP {
m, sUDP, err = reader.ReadUDP(lUDP, rtimeout)
} else {
m, sPC, err = readerPC.ReadPacketConn(l, rtimeout)
}
if err != nil {
if !srv.isStarted() {
return nil
}
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
continue
}
return err
}
if len(m) < headerSize {
if cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
continue
}
wg.Add(1)
go srv.serveUDPPacket(&wg, m, l, sUDP, sPC)
}
return nil
}
// Serve a new TCP connection.
func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
w := &response{tsigProvider: srv.tsigProvider(), tcp: rw}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
idleTimeout := tcpIdleTimeout
if srv.IdleTimeout != nil {
idleTimeout = srv.IdleTimeout()
}
timeout := srv.getReadTimeout()
limit := srv.MaxTCPQueries
if limit == 0 {
limit = maxTCPQueries
}
for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
m, err := reader.ReadTCP(w.tcp, timeout)
if err != nil {
// TODO(tmthrgd): handle error
break
}
srv.serveDNS(m, w)
if w.closed {
break // Close() was called
}
if w.hijacked {
break // client will call Close() themselves
}
// The first read uses the read timeout, the rest use the
// idle timeout.
timeout = idleTimeout
}
if !w.hijacked {
w.Close()
}
srv.lock.Lock()
delete(srv.conns, w.tcp)
srv.lock.Unlock()
wg.Done()
}
// Serve a new UDP request.
func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn, udpSession *SessionUDP, pcSession net.Addr) {
w := &response{tsigProvider: srv.tsigProvider(), udp: u, udpSession: udpSession, pcSession: pcSession}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
srv.serveDNS(m, w)
wg.Done()
}
func (srv *Server) serveDNS(m []byte, w *response) {
dh, off, err := unpackMsgHdr(m, 0)
if err != nil {
// Let client hang, they are sending crap; any reply can be used to amplify.
return
}
req := new(Msg)
req.setHdr(dh)
switch action := srv.MsgAcceptFunc(dh); action {
case MsgAccept:
if req.unpack(dh, m, off) == nil {
break
}
fallthrough
case MsgReject, MsgRejectNotImplemented:
opcode := req.Opcode
req.SetRcodeFormatError(req)
req.Zero = false
if action == MsgRejectNotImplemented {
req.Opcode = opcode
req.Rcode = RcodeNotImplemented
}
// Are we allowed to delete any OPT records here?
req.Ns, req.Answer, req.Extra = nil, nil, nil
w.WriteMsg(req)
fallthrough
case MsgIgnore:
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
return
}
w.tsigStatus = nil
if w.tsigProvider != nil {
if t := req.IsTsig(); t != nil {
w.tsigStatus = TsigVerifyWithProvider(m, w.tsigProvider, "", false)
w.tsigTimersOnly = false
w.tsigRequestMAC = t.MAC
}
}
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
srv.Handler.ServeDNS(w, req) // Writes back to the client
}
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
// If we race with ShutdownContext, the read deadline may
// have been set in the distant past to unblock the read
// below. We must not override it, otherwise we may block
// ShutdownContext.
srv.lock.RLock()
if srv.started {
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
var length uint16
if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
return nil, err
}
m := make([]byte, length)
if _, err := io.ReadFull(conn, m); err != nil {
return nil, err
}
return m, nil
}
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
srv.lock.RLock()
if srv.started {
// See the comment in readTCP above.
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
m := srv.udpPool.Get().([]byte)
n, s, err := ReadFromSessionUDP(conn, m)
if err != nil {
srv.udpPool.Put(m)
return nil, nil, err
}
m = m[:n]
return m, s, nil
}
func (srv *Server) readPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) {
srv.lock.RLock()
if srv.started {
// See the comment in readTCP above.
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
m := srv.udpPool.Get().([]byte)
n, addr, err := conn.ReadFrom(m)
if err != nil {
srv.udpPool.Put(m)
return nil, nil, err
}
m = m[:n]
return m, addr, nil
}
// WriteMsg implements the ResponseWriter.WriteMsg method.
func (w *response) WriteMsg(m *Msg) (err error) {
if w.closed {
return &Error{err: "WriteMsg called after Close"}
}
var data []byte
if w.tsigProvider != nil { // if no provider, dont check for the tsig (which is a longer check)
if t := m.IsTsig(); t != nil {
data, w.tsigRequestMAC, err = TsigGenerateWithProvider(m, w.tsigProvider, w.tsigRequestMAC, w.tsigTimersOnly)
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
}
data, err = m.Pack()
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
// Write implements the ResponseWriter.Write method.
func (w *response) Write(m []byte) (int, error) {
if w.closed {
return 0, &Error{err: "Write called after Close"}
}
switch {
case w.udp != nil:
if u, ok := w.udp.(*net.UDPConn); ok {
return WriteToSessionUDP(u, m, w.udpSession)
}
return w.udp.WriteTo(m, w.pcSession)
case w.tcp != nil:
if len(m) > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
msg := make([]byte, 2+len(m))
binary.BigEndian.PutUint16(msg, uint16(len(m)))
copy(msg[2:], m)
return w.tcp.Write(msg)
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// LocalAddr implements the ResponseWriter.LocalAddr method.
func (w *response) LocalAddr() net.Addr {
switch {
case w.udp != nil:
return w.udp.LocalAddr()
case w.tcp != nil:
return w.tcp.LocalAddr()
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
func (w *response) RemoteAddr() net.Addr {
switch {
case w.udpSession != nil:
return w.udpSession.RemoteAddr()
case w.pcSession != nil:
return w.pcSession
case w.tcp != nil:
return w.tcp.RemoteAddr()
default:
panic("dns: internal error: udpSession, pcSession and tcp are all nil")
}
}
// TsigStatus implements the ResponseWriter.TsigStatus method.
func (w *response) TsigStatus() error { return w.tsigStatus }
// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
// Hijack implements the ResponseWriter.Hijack method.
func (w *response) Hijack() { w.hijacked = true }
// Close implements the ResponseWriter.Close method
func (w *response) Close() error {
if w.closed {
return &Error{err: "connection already closed"}
}
w.closed = true
switch {
case w.udp != nil:
// Can't close the udp conn, as that is actually the listener.
return nil
case w.tcp != nil:
return w.tcp.Close()
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
func (w *response) ConnectionState() *tls.ConnectionState {
type tlsConnectionStater interface {
ConnectionState() tls.ConnectionState
}
if v, ok := w.tcp.(tlsConnectionStater); ok {
t := v.ConnectionState()
return &t
}
return nil
}
package dns
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"encoding/binary"
"math/big"
"strings"
"time"
)
// Sign signs a dns.Msg. It fills the signature with the appropriate data.
// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
// and Expiration set.
func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
if k == nil {
return nil, ErrPrivKey
}
if rr.KeyTag == 0 || rr.SignerName == "" || rr.Algorithm == 0 {
return nil, ErrKey
}
rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0}
rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0
buf := make([]byte, m.Len()+Len(rr))
mbuf, err := m.PackBuffer(buf)
if err != nil {
return nil, err
}
if &buf[0] != &mbuf[0] {
return nil, ErrBuf
}
off, err := PackRR(rr, buf, len(mbuf), nil, false)
if err != nil {
return nil, err
}
buf = buf[:off:cap(buf)]
h, cryptohash, err := hashFromAlgorithm(rr.Algorithm)
if err != nil {
return nil, err
}
// Write SIG rdata
h.Write(buf[len(mbuf)+1+2+2+4+2:])
// Write message
h.Write(buf[:len(mbuf)])
signature, err := sign(k, h.Sum(nil), cryptohash, rr.Algorithm)
if err != nil {
return nil, err
}
rr.Signature = toBase64(signature)
buf = append(buf, signature...)
if len(buf) > int(^uint16(0)) {
return nil, ErrBuf
}
// Adjust sig data length
rdoff := len(mbuf) + 1 + 2 + 2 + 4
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
rdlen += uint16(len(signature))
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
// Adjust additional count
adc := binary.BigEndian.Uint16(buf[10:])
adc++
binary.BigEndian.PutUint16(buf[10:], adc)
return buf, nil
}
// Verify validates the message buf using the key k.
// It's assumed that buf is a valid message from which rr was unpacked.
func (rr *SIG) Verify(k *KEY, buf []byte) error {
if k == nil {
return ErrKey
}
if rr.KeyTag == 0 || rr.SignerName == "" || rr.Algorithm == 0 {
return ErrKey
}
h, cryptohash, err := hashFromAlgorithm(rr.Algorithm)
if err != nil {
return err
}
buflen := len(buf)
qdc := binary.BigEndian.Uint16(buf[4:])
anc := binary.BigEndian.Uint16(buf[6:])
auc := binary.BigEndian.Uint16(buf[8:])
adc := binary.BigEndian.Uint16(buf[10:])
offset := headerSize
for i := uint16(0); i < qdc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type and Class
offset += 2 + 2
}
for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type, Class and TTL
offset += 2 + 2 + 4
if offset+1 >= buflen {
continue
}
rdlen := binary.BigEndian.Uint16(buf[offset:])
offset += 2
offset += int(rdlen)
}
if offset >= buflen {
return &Error{err: "overflowing unpacking signed message"}
}
// offset should be just prior to SIG
bodyend := offset
// owner name SHOULD be root
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip Type, Class, TTL, RDLen
offset += 2 + 2 + 4 + 2
sigstart := offset
// Skip Type Covered, Algorithm, Labels, Original TTL
offset += 2 + 1 + 1 + 4
if offset+4+4 >= buflen {
return &Error{err: "overflow unpacking signed message"}
}
expire := binary.BigEndian.Uint32(buf[offset:])
offset += 4
incept := binary.BigEndian.Uint32(buf[offset:])
offset += 4
now := uint32(time.Now().Unix())
if now < incept || now > expire {
return ErrTime
}
// Skip key tag
offset += 2
var signername string
signername, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// If key has come from the DNS name compression might
// have mangled the case of the name
if !strings.EqualFold(signername, k.Header().Name) {
return &Error{err: "signer name doesn't match key name"}
}
sigend := offset
h.Write(buf[sigstart:sigend])
h.Write(buf[:10])
h.Write([]byte{
byte((adc - 1) << 8),
byte(adc - 1),
})
h.Write(buf[12:bodyend])
hashed := h.Sum(nil)
sig := buf[sigend:]
switch k.Algorithm {
case RSASHA1, RSASHA256, RSASHA512:
pk := k.publicKeyRSA()
if pk != nil {
return rsa.VerifyPKCS1v15(pk, cryptohash, hashed, sig)
}
case ECDSAP256SHA256, ECDSAP384SHA384:
pk := k.publicKeyECDSA()
r := new(big.Int).SetBytes(sig[:len(sig)/2])
s := new(big.Int).SetBytes(sig[len(sig)/2:])
if pk != nil {
if ecdsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
case ED25519:
pk := k.publicKeyED25519()
if pk != nil {
if ed25519.Verify(pk, hashed, sig) {
return nil
}
return ErrSig
}
}
return ErrKeyAlg
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted for dns package usage by Miek Gieben.
package dns
import "sync"
import "time"
// call is an in-flight or completed singleflight.Do call
type call struct {
wg sync.WaitGroup
val *Msg
rtt time.Duration
err error
dups int
}
// singleflight represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type singleflight struct {
sync.Mutex // protects m
m map[string]*call // lazily initialized
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
g.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
g.Unlock()
c.wg.Wait()
return c.val, c.rtt, c.err, true
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.Unlock()
c.val, c.rtt, c.err = fn()
c.wg.Done()
if !g.dontDeleteForTesting {
g.Lock()
delete(g.m, key)
g.Unlock()
}
return c.val, c.rtt, c.err, c.dups > 0
}
package dns
import (
"crypto/sha256"
"crypto/x509"
"encoding/hex"
)
// Sign creates a SMIMEA record from an SSL certificate.
func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeSMIMEA
r.Usage = uint8(usage)
r.Selector = uint8(selector)
r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
return err
}
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
// a nil error is returned.
func (r *SMIMEA) Verify(cert *x509.Certificate) error {
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err // Not also ErrSig?
}
if r.Certificate == c {
return nil
}
return ErrSig // ErrSig, really?
}
// SMIMEAName returns the ownername of a SMIMEA resource record as per the
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
func SMIMEAName(email, domain string) (string, error) {
hasher := sha256.New()
hasher.Write([]byte(email))
// RFC Section 3: "The local-part is hashed using the SHA2-256
// algorithm with the hash truncated to 28 octets and
// represented in its hexadecimal representation to become the
// left-most label in the prepared domain name"
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
}