diff --git a/saml/saml.go b/saml/saml.go
new file mode 100644
index 0000000000000000000000000000000000000000..40713e8f9d21da7fe056fb8623fa96480a0deb1d
--- /dev/null
+++ b/saml/saml.go
@@ -0,0 +1,203 @@
+package saml
+
+import (
+	"crypto/rand"
+	"crypto/tls"
+	"encoding/base64"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"strings"
+	"time"
+
+	"github.com/crewjam/saml"
+	"github.com/crewjam/saml/logger"
+	"github.com/gorilla/mux"
+	yaml "gopkg.in/yaml.v2"
+
+	"git.autistici.org/id/go-sso/httpsso"
+)
+
+type ServiceProviderConfig struct {
+}
+
+func (p *ServiceProviderConfig) toEntity() *saml.EntityDescriptor {
+	return nil
+}
+
+type Config struct {
+	BaseURL string `yaml:"base_url"`
+
+	UsersFile string `yaml:"users_file"`
+
+	// SAML X509 credentials.
+	CertificateFile string `yaml:"certificate_file"`
+	PrivateKeyFile  string `yaml:"private_key_file"`
+
+	// SSO configuration.
+	SessionAuthKey    string `yaml:"session_auth_key"`
+	SessionEncKey     string `yaml:"session_enc_key"`
+	SSOLoginServerURL string `yaml:"sso_server_url"`
+	SSOPublicKeyFile  string `yaml:"sso_public_key_file"`
+	SSODomain         string `yaml:"sso_domain"`
+
+	// Service provider config.
+	ServiceProviders map[string]*ServiceProviderConfig `yaml:"service_providers"`
+}
+
+// Sanity checks for the configuration.
+func (c *Config) check() error {
+	switch len(c.SessionAuthKey) {
+	case 32, 64:
+	case 0:
+		return errors.New("session_auth_key is empty")
+	default:
+		return errors.New("session_auth_key must be a random string of 32 or 64 bytes")
+	}
+	switch len(c.SessionEncKey) {
+	case 16, 24, 32:
+	case 0:
+		return errors.New("session_enc_key is empty")
+	default:
+		return errors.New("session_enc_key must be a random string of 16, 24 or 32 bytes")
+	}
+	if c.SSOLoginServerURL == "" {
+		return errors.New("sso_server_url is empty")
+	}
+	if c.SSODomain == "" {
+		return errors.New("sso_domain is empty")
+	}
+	return nil
+}
+
+func (c *Config) GetServiceProvider(r *http.Request, serviceProviderID string) (*saml.EntityDescriptor, error) {
+	srv, ok := c.ServiceProviders[serviceProviderID]
+	if !ok {
+		return nil, os.ErrNotExist
+	}
+	return srv.toEntity(), nil
+}
+
+// Read users from a YAML-encoded file, in a format surprisingly
+// compatible with git.autistici.org/id/auth/server.
+//
+// TODO: Make it retrieve the email addresses as extra data in the SSO
+// token (this feature is currently unsupported by the SSO server,
+// even though the auth-server provides the information).
+type userInfo struct {
+	Name  string `yaml:"name"`
+	Email string `yaml:"email"`
+}
+
+type userFileBackend struct {
+	users map[string]userInfo
+}
+
+func newUserFileBackend(path string) (*userFileBackend, error) {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+	var userList []userInfo
+	if err := yaml.Unmarshal(data, &userList); err != nil {
+		return nil, err
+	}
+	users := make(map[string]userInfo)
+	for _, u := range userList {
+		users[u.Name] = u
+	}
+	return &userFileBackend{users}, nil
+}
+
+func (b *userFileBackend) GetSession(w http.ResponseWriter, r *http.Request, req *saml.IdpAuthnRequest) *saml.Session {
+	// The request should have the X-Authenticated-User header.
+	username := r.Header.Get("X-Authenticated-User")
+	if username == "" {
+		http.Error(w, "No user found", http.StatusInternalServerError)
+		return nil
+	}
+	user, ok := b.users[username]
+	if !ok {
+		http.Error(w, "User not found", http.StatusInternalServerError)
+		return nil
+	}
+
+	return &saml.Session{
+		ID:             base64.StdEncoding.EncodeToString(randomBytes(32)),
+		CreateTime:     saml.TimeNow(),
+		ExpireTime:     saml.TimeNow().Add(sessionMaxAge),
+		Index:          hex.EncodeToString(randomBytes(32)),
+		UserName:       user.Name,
+		UserEmail:      user.Email,
+		UserCommonName: user.Name,
+		UserGivenName:  user.Name,
+	}
+}
+
+func NewSAMLIDP(config *Config) (http.Handler, error) {
+	if err := config.check(); err != nil {
+		return nil, err
+	}
+
+	cert, err := tls.LoadX509KeyPair(config.CertificateFile, config.PrivateKeyFile)
+	if err != nil {
+		return nil, err
+	}
+
+	pkey, err := ioutil.ReadFile(config.SSOPublicKeyFile)
+	if err != nil {
+		return nil, err
+	}
+
+	w, err := httpsso.NewSSOWrapper(config.SSOLoginServerURL, pkey, config.SSODomain, []byte(config.SessionAuthKey), []byte(config.SessionEncKey))
+	if err != nil {
+		return nil, err
+	}
+
+	baseURL, err := url.Parse(config.BaseURL)
+	if err != nil {
+		return nil, err
+	}
+	ssoURL := baseURL
+	ssoURL.Path += "/sso"
+	metadataURL := baseURL
+	metadataURL.Path += "/metadata"
+	svc := fmt.Sprintf("%s%s", baseURL.Host, baseURL.Path)
+	if !strings.HasSuffix(svc, "/") {
+		svc += "/"
+	}
+
+	users, err := newUserFileBackend(config.UsersFile)
+	if err != nil {
+		return nil, err
+	}
+
+	idp := &saml.IdentityProvider{
+		Key:                     cert.PrivateKey,
+		Certificate:             cert.Leaf,
+		Logger:                  logger.DefaultLogger,
+		SSOURL:                  *ssoURL,
+		ServiceProviderProvider: config,
+		SessionProvider:         users,
+	}
+	h := idp.Handler()
+
+	root := mux.NewRouter()
+	root.Handle(ssoURL.Path, w.Wrap(h, svc, nil))
+	root.Handle(metadataURL.Path, h)
+	return root, nil
+}
+
+func randomBytes(n int) []byte {
+	b := make([]byte, n)
+	if _, err := rand.Read(b); err != nil {
+		panic(err)
+	}
+	return b
+}
+
+var sessionMaxAge = 300 * time.Second
diff --git a/vendor/github.com/beevik/etree/CONTRIBUTORS b/vendor/github.com/beevik/etree/CONTRIBUTORS
new file mode 100644
index 0000000000000000000000000000000000000000..084662c3a8352374a8d75507c6cc511e1ac7179e
--- /dev/null
+++ b/vendor/github.com/beevik/etree/CONTRIBUTORS
@@ -0,0 +1,8 @@
+Brett Vickers (beevik)
+Felix Geisendörfer (felixge)
+Kamil Kisiel (kisielk)
+Graham King (grahamking)
+Matt Smith (ma314smith)
+Michal Jemala (michaljemala)
+Nicolas Piganeau (npiganeau)
+Chris Brown (ccbrown)
diff --git a/vendor/github.com/beevik/etree/LICENSE b/vendor/github.com/beevik/etree/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..e14ad682a0d305f4769218b03d68c42e66eaec64
--- /dev/null
+++ b/vendor/github.com/beevik/etree/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2015 Brett Vickers. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/beevik/etree/README.md b/vendor/github.com/beevik/etree/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..28558433c80bd2edd0cfec445beeaf45c29e053a
--- /dev/null
+++ b/vendor/github.com/beevik/etree/README.md
@@ -0,0 +1,203 @@
+[![Build Status](https://travis-ci.org/beevik/etree.svg?branch=master)](https://travis-ci.org/beevik/etree)
+[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree)
+
+etree
+=====
+
+The etree package is a lightweight, pure go package that expresses XML in
+the form of an element tree.  Its design was inspired by the Python
+[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html)
+module. Some of the package's features include:
+
+* Represents XML documents as trees of elements for easy traversal.
+* Imports, serializes, modifies or creates XML documents from scratch.
+* Writes and reads XML to/from files, byte slices, strings and io interfaces.
+* Performs simple or complex searches with lightweight XPath-like query APIs.
+* Auto-indents XML using spaces or tabs for better readability.
+* Implemented in pure go; depends only on standard go libraries.
+* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml)
+  package.
+
+### Creating an XML document
+
+The following example creates an XML document from scratch using the etree
+package and outputs its indented contents to stdout.
+```go
+doc := etree.NewDocument()
+doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
+doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`)
+
+people := doc.CreateElement("People")
+people.CreateComment("These are all known people")
+
+jon := people.CreateElement("Person")
+jon.CreateAttr("name", "Jon")
+
+sally := people.CreateElement("Person")
+sally.CreateAttr("name", "Sally")
+
+doc.Indent(2)
+doc.WriteTo(os.Stdout)
+```
+
+Output:
+```xml
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="style.xsl"?>
+<People>
+  <!--These are all known people-->
+  <Person name="Jon"/>
+  <Person name="Sally"/>
+</People>
+```
+
+### Reading an XML file
+
+Suppose you have a file on disk called `bookstore.xml` containing the
+following data:
+
+```xml
+<bookstore xmlns:p="urn:schemas-books-com:prices">
+
+  <book category="COOKING">
+    <title lang="en">Everyday Italian</title>
+    <author>Giada De Laurentiis</author>
+    <year>2005</year>
+    <p:price>30.00</p:price>
+  </book>
+
+  <book category="CHILDREN">
+    <title lang="en">Harry Potter</title>
+    <author>J K. Rowling</author>
+    <year>2005</year>
+    <p:price>29.99</p:price>
+  </book>
+
+  <book category="WEB">
+    <title lang="en">XQuery Kick Start</title>
+    <author>James McGovern</author>
+    <author>Per Bothner</author>
+    <author>Kurt Cagle</author>
+    <author>James Linn</author>
+    <author>Vaidyanathan Nagarajan</author>
+    <year>2003</year>
+    <p:price>49.99</p:price>
+  </book>
+
+  <book category="WEB">
+    <title lang="en">Learning XML</title>
+    <author>Erik T. Ray</author>
+    <year>2003</year>
+    <p:price>39.95</p:price>
+  </book>
+
+</bookstore>
+```
+
+This code reads the file's contents into an etree document.
+```go
+doc := etree.NewDocument()
+if err := doc.ReadFromFile("bookstore.xml"); err != nil {
+    panic(err)
+}
+```
+
+You can also read XML from a string, a byte slice, or an `io.Reader`.
+
+### Processing elements and attributes
+
+This example illustrates several ways to access elements and attributes using
+etree selection queries.
+```go
+root := doc.SelectElement("bookstore")
+fmt.Println("ROOT element:", root.Tag)
+
+for _, book := range root.SelectElements("book") {
+    fmt.Println("CHILD element:", book.Tag)
+    if title := book.SelectElement("title"); title != nil {
+        lang := title.SelectAttrValue("lang", "unknown")
+        fmt.Printf("  TITLE: %s (%s)\n", title.Text(), lang)
+    }
+    for _, attr := range book.Attr {
+        fmt.Printf("  ATTR: %s=%s\n", attr.Key, attr.Value)
+    }
+}
+```
+Output:
+```
+ROOT element: bookstore
+CHILD element: book
+  TITLE: Everyday Italian (en)
+  ATTR: category=COOKING
+CHILD element: book
+  TITLE: Harry Potter (en)
+  ATTR: category=CHILDREN
+CHILD element: book
+  TITLE: XQuery Kick Start (en)
+  ATTR: category=WEB
+CHILD element: book
+  TITLE: Learning XML (en)
+  ATTR: category=WEB
+```
+
+### Path queries
+
+This example uses etree's path functions to select all book titles that fall
+into the category of 'WEB'.  The double-slash prefix in the path causes the
+search for book elements to occur recursively; book elements may appear at any
+level of the XML hierarchy.
+```go
+for _, t := range doc.FindElements("//book[@category='WEB']/title") {
+    fmt.Println("Title:", t.Text())
+}
+```
+
+Output:
+```
+Title: XQuery Kick Start
+Title: Learning XML
+```
+
+This example finds the first book element under the root bookstore element and
+outputs the tag and text of each of its child elements.
+```go
+for _, e := range doc.FindElements("./bookstore/book[1]/*") {
+    fmt.Printf("%s: %s\n", e.Tag, e.Text())
+}
+```
+
+Output:
+```
+title: Everyday Italian
+author: Giada De Laurentiis
+year: 2005
+price: 30.00
+```
+
+This example finds all books with a price of 49.99 and outputs their titles.
+```go
+path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title")
+for _, e := range doc.FindElementsPath(path) {
+    fmt.Println(e.Text())
+}
+```
+
+Output:
+```
+XQuery Kick Start
+```
+
+Note that this example uses the FindElementsPath function, which takes as an
+argument a pre-compiled path object. Use precompiled paths when you plan to
+search with the same path more than once.
+
+### Other features
+
+These are just a few examples of the things the etree package can do. See the
+[documentation](http://godoc.org/github.com/beevik/etree) for a complete
+description of its capabilities.
+
+### Contributing
+
+This project accepts contributions. Just fork the repo and submit a pull
+request!
diff --git a/vendor/github.com/beevik/etree/etree.go b/vendor/github.com/beevik/etree/etree.go
new file mode 100644
index 0000000000000000000000000000000000000000..36b279f60039f21a251bc58bd8c1343566f4fef5
--- /dev/null
+++ b/vendor/github.com/beevik/etree/etree.go
@@ -0,0 +1,943 @@
+// Copyright 2015 Brett Vickers.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package etree provides XML services through an Element Tree
+// abstraction.
+package etree
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/xml"
+	"errors"
+	"io"
+	"os"
+	"strings"
+)
+
+const (
+	// NoIndent is used with Indent to disable all indenting.
+	NoIndent = -1
+)
+
+// ErrXML is returned when XML parsing fails due to incorrect formatting.
+var ErrXML = errors.New("etree: invalid XML format")
+
+// ReadSettings allow for changing the default behavior of the ReadFrom*
+// methods.
+type ReadSettings struct {
+	// CharsetReader to be passed to standard xml.Decoder. Default: nil.
+	CharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+	// Permissive allows input containing common mistakes such as missing tags
+	// or attribute values. Default: false.
+	Permissive bool
+}
+
+// newReadSettings creates a default ReadSettings record.
+func newReadSettings() ReadSettings {
+	return ReadSettings{}
+}
+
+// WriteSettings allow for changing the serialization behavior of the WriteTo*
+// methods.
+type WriteSettings struct {
+	// CanonicalEndTags forces the production of XML end tags, even for
+	// elements that have no child elements. Default: false.
+	CanonicalEndTags bool
+
+	// CanonicalText forces the production of XML character references for
+	// text data characters &, <, and >. If false, XML character references
+	// are also produced for " and '. Default: false.
+	CanonicalText bool
+
+	// CanonicalAttrVal forces the production of XML character references for
+	// attribute value characters &, < and ". If false, XML character
+	// references are also produced for > and '. Default: false.
+	CanonicalAttrVal bool
+}
+
+// newWriteSettings creates a default WriteSettings record.
+func newWriteSettings() WriteSettings {
+	return WriteSettings{
+		CanonicalEndTags: false,
+		CanonicalText:    false,
+		CanonicalAttrVal: false,
+	}
+}
+
+// A Token is an empty interface that represents an Element, CharData,
+// Comment, Directive, or ProcInst.
+type Token interface {
+	Parent() *Element
+	dup(parent *Element) Token
+	setParent(parent *Element)
+	writeTo(w *bufio.Writer, s *WriteSettings)
+}
+
+// A Document is a container holding a complete XML hierarchy. Its embedded
+// element contains zero or more children, one of which is usually the root
+// element.  The embedded element may include other children such as
+// processing instructions or BOM CharData tokens.
+type Document struct {
+	Element
+	ReadSettings  ReadSettings
+	WriteSettings WriteSettings
+}
+
+// An Element represents an XML element, its attributes, and its child tokens.
+type Element struct {
+	Space, Tag string   // namespace and tag
+	Attr       []Attr   // key-value attribute pairs
+	Child      []Token  // child tokens (elements, comments, etc.)
+	parent     *Element // parent element
+}
+
+// An Attr represents a key-value attribute of an XML element.
+type Attr struct {
+	Space, Key string // The attribute's namespace and key
+	Value      string // The attribute value string
+}
+
+// CharData represents character data within XML.
+type CharData struct {
+	Data       string
+	parent     *Element
+	whitespace bool
+}
+
+// A Comment represents an XML comment.
+type Comment struct {
+	Data   string
+	parent *Element
+}
+
+// A Directive represents an XML directive.
+type Directive struct {
+	Data   string
+	parent *Element
+}
+
+// A ProcInst represents an XML processing instruction.
+type ProcInst struct {
+	Target string
+	Inst   string
+	parent *Element
+}
+
+// NewDocument creates an XML document without a root element.
+func NewDocument() *Document {
+	return &Document{
+		Element{Child: make([]Token, 0)},
+		newReadSettings(),
+		newWriteSettings(),
+	}
+}
+
+// Copy returns a recursive, deep copy of the document.
+func (d *Document) Copy() *Document {
+	return &Document{*(d.dup(nil).(*Element)), d.ReadSettings, d.WriteSettings}
+}
+
+// Root returns the root element of the document, or nil if there is no root
+// element.
+func (d *Document) Root() *Element {
+	for _, t := range d.Child {
+		if c, ok := t.(*Element); ok {
+			return c
+		}
+	}
+	return nil
+}
+
+// SetRoot replaces the document's root element with e. If the document
+// already has a root when this function is called, then the document's
+// original root is unbound first. If the element e is bound to another
+// document (or to another element within a document), then it is unbound
+// first.
+func (d *Document) SetRoot(e *Element) {
+	if e.parent != nil {
+		e.parent.RemoveChild(e)
+	}
+	e.setParent(&d.Element)
+
+	for i, t := range d.Child {
+		if _, ok := t.(*Element); ok {
+			t.setParent(nil)
+			d.Child[i] = e
+			return
+		}
+	}
+	d.Child = append(d.Child, e)
+}
+
+// ReadFrom reads XML from the reader r into the document d. It returns the
+// number of bytes read and any error encountered.
+func (d *Document) ReadFrom(r io.Reader) (n int64, err error) {
+	return d.Element.readFrom(r, d.ReadSettings)
+}
+
+// ReadFromFile reads XML from the string s into the document d.
+func (d *Document) ReadFromFile(filename string) error {
+	f, err := os.Open(filename)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, err = d.ReadFrom(f)
+	return err
+}
+
+// ReadFromBytes reads XML from the byte slice b into the document d.
+func (d *Document) ReadFromBytes(b []byte) error {
+	_, err := d.ReadFrom(bytes.NewReader(b))
+	return err
+}
+
+// ReadFromString reads XML from the string s into the document d.
+func (d *Document) ReadFromString(s string) error {
+	_, err := d.ReadFrom(strings.NewReader(s))
+	return err
+}
+
+// WriteTo serializes an XML document into the writer w. It
+// returns the number of bytes written and any error encountered.
+func (d *Document) WriteTo(w io.Writer) (n int64, err error) {
+	cw := newCountWriter(w)
+	b := bufio.NewWriter(cw)
+	for _, c := range d.Child {
+		c.writeTo(b, &d.WriteSettings)
+	}
+	err, n = b.Flush(), cw.bytes
+	return
+}
+
+// WriteToFile serializes an XML document into the file named
+// filename.
+func (d *Document) WriteToFile(filename string) error {
+	f, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, err = d.WriteTo(f)
+	return err
+}
+
+// WriteToBytes serializes the XML document into a slice of
+// bytes.
+func (d *Document) WriteToBytes() (b []byte, err error) {
+	var buf bytes.Buffer
+	if _, err = d.WriteTo(&buf); err != nil {
+		return
+	}
+	return buf.Bytes(), nil
+}
+
+// WriteToString serializes the XML document into a string.
+func (d *Document) WriteToString() (s string, err error) {
+	var b []byte
+	if b, err = d.WriteToBytes(); err != nil {
+		return
+	}
+	return string(b), nil
+}
+
+type indentFunc func(depth int) string
+
+// Indent modifies the document's element tree by inserting CharData entities
+// containing carriage returns and indentation. The amount of indentation per
+// depth level is given as spaces. Pass etree.NoIndent for spaces if you want
+// no indentation at all.
+func (d *Document) Indent(spaces int) {
+	var indent indentFunc
+	switch {
+	case spaces < 0:
+		indent = func(depth int) string { return "" }
+	default:
+		indent = func(depth int) string { return crIndent(depth*spaces, crsp) }
+	}
+	d.Element.indent(0, indent)
+}
+
+// IndentTabs modifies the document's element tree by inserting CharData
+// entities containing carriage returns and tabs for indentation.  One tab is
+// used per indentation level.
+func (d *Document) IndentTabs() {
+	indent := func(depth int) string { return crIndent(depth, crtab) }
+	d.Element.indent(0, indent)
+}
+
+// NewElement creates an unparented element with the specified tag. The tag
+// may be prefixed by a namespace and a colon.
+func NewElement(tag string) *Element {
+	space, stag := spaceDecompose(tag)
+	return newElement(space, stag, nil)
+}
+
+// newElement is a helper function that creates an element and binds it to
+// a parent element if possible.
+func newElement(space, tag string, parent *Element) *Element {
+	e := &Element{
+		Space:  space,
+		Tag:    tag,
+		Attr:   make([]Attr, 0),
+		Child:  make([]Token, 0),
+		parent: parent,
+	}
+	if parent != nil {
+		parent.addChild(e)
+	}
+	return e
+}
+
+// Copy creates a recursive, deep copy of the element and all its attributes
+// and children. The returned element has no parent but can be parented to a
+// another element using AddElement, or to a document using SetRoot.
+func (e *Element) Copy() *Element {
+	var parent *Element
+	return e.dup(parent).(*Element)
+}
+
+// Text returns the characters immediately following the element's
+// opening tag.
+func (e *Element) Text() string {
+	if len(e.Child) == 0 {
+		return ""
+	}
+	if cd, ok := e.Child[0].(*CharData); ok {
+		return cd.Data
+	}
+	return ""
+}
+
+// SetText replaces an element's subsidiary CharData text with a new string.
+func (e *Element) SetText(text string) {
+	if len(e.Child) > 0 {
+		if cd, ok := e.Child[0].(*CharData); ok {
+			cd.Data = text
+			return
+		}
+	}
+	cd := newCharData(text, false, e)
+	copy(e.Child[1:], e.Child[0:])
+	e.Child[0] = cd
+}
+
+// CreateElement creates an element with the specified tag and adds it as the
+// last child element of the element e. The tag may be prefixed by a namespace
+// and a colon.
+func (e *Element) CreateElement(tag string) *Element {
+	space, stag := spaceDecompose(tag)
+	return newElement(space, stag, e)
+}
+
+// AddChild adds the token t as the last child of element e. If token t was
+// already the child of another element, it is first removed from its current
+// parent element.
+func (e *Element) AddChild(t Token) {
+	if t.Parent() != nil {
+		t.Parent().RemoveChild(t)
+	}
+	t.setParent(e)
+	e.addChild(t)
+}
+
+// InsertChild inserts the token t before e's existing child token ex. If ex
+// is nil (or if ex is not a child of e), then t is added to the end of e's
+// child token list. If token t was already the child of another element, it
+// is first removed from its current parent element.
+func (e *Element) InsertChild(ex Token, t Token) {
+	if t.Parent() != nil {
+		t.Parent().RemoveChild(t)
+	}
+	t.setParent(e)
+
+	for i, c := range e.Child {
+		if c == ex {
+			e.Child = append(e.Child, nil)
+			copy(e.Child[i+1:], e.Child[i:])
+			e.Child[i] = t
+			return
+		}
+	}
+	e.addChild(t)
+}
+
+// RemoveChild attempts to remove the token t from element e's list of
+// children. If the token t is a child of e, then it is returned. Otherwise,
+// nil is returned.
+func (e *Element) RemoveChild(t Token) Token {
+	for i, c := range e.Child {
+		if c == t {
+			e.Child = append(e.Child[:i], e.Child[i+1:]...)
+			c.setParent(nil)
+			return t
+		}
+	}
+	return nil
+}
+
+// ReadFrom reads XML from the reader r and stores the result as a new child
+// of element e.
+func (e *Element) readFrom(ri io.Reader, settings ReadSettings) (n int64, err error) {
+	r := newCountReader(ri)
+	dec := xml.NewDecoder(r)
+	dec.CharsetReader = settings.CharsetReader
+	dec.Strict = !settings.Permissive
+	var stack stack
+	stack.push(e)
+	for {
+		t, err := dec.RawToken()
+		switch {
+		case err == io.EOF:
+			return r.bytes, nil
+		case err != nil:
+			return r.bytes, err
+		case stack.empty():
+			return r.bytes, ErrXML
+		}
+
+		top := stack.peek().(*Element)
+
+		switch t := t.(type) {
+		case xml.StartElement:
+			e := newElement(t.Name.Space, t.Name.Local, top)
+			for _, a := range t.Attr {
+				e.createAttr(a.Name.Space, a.Name.Local, a.Value)
+			}
+			stack.push(e)
+		case xml.EndElement:
+			stack.pop()
+		case xml.CharData:
+			data := string(t)
+			newCharData(data, isWhitespace(data), top)
+		case xml.Comment:
+			newComment(string(t), top)
+		case xml.Directive:
+			newDirective(string(t), top)
+		case xml.ProcInst:
+			newProcInst(t.Target, string(t.Inst), top)
+		}
+	}
+}
+
+// SelectAttr finds an element attribute matching the requested key and
+// returns it if found. The key may be prefixed by a namespace and a colon.
+func (e *Element) SelectAttr(key string) *Attr {
+	space, skey := spaceDecompose(key)
+	for i, a := range e.Attr {
+		if spaceMatch(space, a.Space) && skey == a.Key {
+			return &e.Attr[i]
+		}
+	}
+	return nil
+}
+
+// SelectAttrValue finds an element attribute matching the requested key and
+// returns its value if found. The key may be prefixed by a namespace and a
+// colon. If the key is not found, the dflt value is returned instead.
+func (e *Element) SelectAttrValue(key, dflt string) string {
+	space, skey := spaceDecompose(key)
+	for _, a := range e.Attr {
+		if spaceMatch(space, a.Space) && skey == a.Key {
+			return a.Value
+		}
+	}
+	return dflt
+}
+
+// ChildElements returns all elements that are children of element e.
+func (e *Element) ChildElements() []*Element {
+	var elements []*Element
+	for _, t := range e.Child {
+		if c, ok := t.(*Element); ok {
+			elements = append(elements, c)
+		}
+	}
+	return elements
+}
+
+// SelectElement returns the first child element with the given tag. The tag
+// may be prefixed by a namespace and a colon.
+func (e *Element) SelectElement(tag string) *Element {
+	space, stag := spaceDecompose(tag)
+	for _, t := range e.Child {
+		if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag {
+			return c
+		}
+	}
+	return nil
+}
+
+// SelectElements returns a slice of all child elements with the given tag.
+// The tag may be prefixed by a namespace and a colon.
+func (e *Element) SelectElements(tag string) []*Element {
+	space, stag := spaceDecompose(tag)
+	var elements []*Element
+	for _, t := range e.Child {
+		if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag {
+			elements = append(elements, c)
+		}
+	}
+	return elements
+}
+
+// FindElement returns the first element matched by the XPath-like path
+// string. Panics if an invalid path string is supplied.
+func (e *Element) FindElement(path string) *Element {
+	return e.FindElementPath(MustCompilePath(path))
+}
+
+// FindElementPath returns the first element matched by the XPath-like path
+// string.
+func (e *Element) FindElementPath(path Path) *Element {
+	p := newPather()
+	elements := p.traverse(e, path)
+	switch {
+	case len(elements) > 0:
+		return elements[0]
+	default:
+		return nil
+	}
+}
+
+// FindElements returns a slice of elements matched by the XPath-like path
+// string. Panics if an invalid path string is supplied.
+func (e *Element) FindElements(path string) []*Element {
+	return e.FindElementsPath(MustCompilePath(path))
+}
+
+// FindElementsPath returns a slice of elements matched by the Path object.
+func (e *Element) FindElementsPath(path Path) []*Element {
+	p := newPather()
+	return p.traverse(e, path)
+}
+
+// indent recursively inserts proper indentation between an
+// XML element's child tokens.
+func (e *Element) indent(depth int, indent indentFunc) {
+	e.stripIndent()
+	n := len(e.Child)
+	if n == 0 {
+		return
+	}
+
+	oldChild := e.Child
+	e.Child = make([]Token, 0, n*2+1)
+	isCharData, firstNonCharData := false, true
+	for _, c := range oldChild {
+
+		// Insert CR+indent before child if it's not character data.
+		// Exceptions: when it's the first non-character-data child, or when
+		// the child is at root depth.
+		_, isCharData = c.(*CharData)
+		if !isCharData {
+			if !firstNonCharData || depth > 0 {
+				newCharData(indent(depth), true, e)
+			}
+			firstNonCharData = false
+		}
+
+		e.addChild(c)
+
+		// Recursively process child elements.
+		if ce, ok := c.(*Element); ok {
+			ce.indent(depth+1, indent)
+		}
+	}
+
+	// Insert CR+indent before the last child.
+	if !isCharData {
+		if !firstNonCharData || depth > 0 {
+			newCharData(indent(depth-1), true, e)
+		}
+	}
+}
+
+// stripIndent removes any previously inserted indentation.
+func (e *Element) stripIndent() {
+	// Count the number of non-indent child tokens
+	n := len(e.Child)
+	for _, c := range e.Child {
+		if cd, ok := c.(*CharData); ok && cd.whitespace {
+			n--
+		}
+	}
+	if n == len(e.Child) {
+		return
+	}
+
+	// Strip out indent CharData
+	newChild := make([]Token, n)
+	j := 0
+	for _, c := range e.Child {
+		if cd, ok := c.(*CharData); ok && cd.whitespace {
+			continue
+		}
+		newChild[j] = c
+		j++
+	}
+	e.Child = newChild
+}
+
+// dup duplicates the element.
+func (e *Element) dup(parent *Element) Token {
+	ne := &Element{
+		Space:  e.Space,
+		Tag:    e.Tag,
+		Attr:   make([]Attr, len(e.Attr)),
+		Child:  make([]Token, len(e.Child)),
+		parent: parent,
+	}
+	for i, t := range e.Child {
+		ne.Child[i] = t.dup(ne)
+	}
+	for i, a := range e.Attr {
+		ne.Attr[i] = a
+	}
+	return ne
+}
+
+// Parent returns the element token's parent element, or nil if it has no
+// parent.
+func (e *Element) Parent() *Element {
+	return e.parent
+}
+
+// setParent replaces the element token's parent.
+func (e *Element) setParent(parent *Element) {
+	e.parent = parent
+}
+
+// writeTo serializes the element to the writer w.
+func (e *Element) writeTo(w *bufio.Writer, s *WriteSettings) {
+	w.WriteByte('<')
+	if e.Space != "" {
+		w.WriteString(e.Space)
+		w.WriteByte(':')
+	}
+	w.WriteString(e.Tag)
+	for _, a := range e.Attr {
+		w.WriteByte(' ')
+		a.writeTo(w, s)
+	}
+	if len(e.Child) > 0 {
+		w.WriteString(">")
+		for _, c := range e.Child {
+			c.writeTo(w, s)
+		}
+		w.Write([]byte{'<', '/'})
+		if e.Space != "" {
+			w.WriteString(e.Space)
+			w.WriteByte(':')
+		}
+		w.WriteString(e.Tag)
+		w.WriteByte('>')
+	} else {
+		if s.CanonicalEndTags {
+			w.Write([]byte{'>', '<', '/'})
+			if e.Space != "" {
+				w.WriteString(e.Space)
+				w.WriteByte(':')
+			}
+			w.WriteString(e.Tag)
+			w.WriteByte('>')
+		} else {
+			w.Write([]byte{'/', '>'})
+		}
+	}
+}
+
+// addChild adds a child token to the element e.
+func (e *Element) addChild(t Token) {
+	e.Child = append(e.Child, t)
+}
+
+// CreateAttr creates an attribute and adds it to element e. The key may be
+// prefixed by a namespace and a colon. If an attribute with the key already
+// exists, its value is replaced.
+func (e *Element) CreateAttr(key, value string) *Attr {
+	space, skey := spaceDecompose(key)
+	return e.createAttr(space, skey, value)
+}
+
+// createAttr is a helper function that creates attributes.
+func (e *Element) createAttr(space, key, value string) *Attr {
+	for i, a := range e.Attr {
+		if space == a.Space && key == a.Key {
+			e.Attr[i].Value = value
+			return &e.Attr[i]
+		}
+	}
+	a := Attr{space, key, value}
+	e.Attr = append(e.Attr, a)
+	return &e.Attr[len(e.Attr)-1]
+}
+
+// RemoveAttr removes and returns the first attribute of the element whose key
+// matches the given key. The key may be prefixed by a namespace and a colon.
+// If an equal attribute does not exist, nil is returned.
+func (e *Element) RemoveAttr(key string) *Attr {
+	space, skey := spaceDecompose(key)
+	for i, a := range e.Attr {
+		if space == a.Space && skey == a.Key {
+			e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...)
+			return &a
+		}
+	}
+	return nil
+}
+
+var xmlReplacerNormal = strings.NewReplacer(
+	"&", "&amp;",
+	"<", "&lt;",
+	">", "&gt;",
+	"'", "&apos;",
+	`"`, "&quot;",
+)
+
+var xmlReplacerCanonicalText = strings.NewReplacer(
+	"&", "&amp;",
+	"<", "&lt;",
+	">", "&gt;",
+	"\r", "&#xD;",
+)
+
+var xmlReplacerCanonicalAttrVal = strings.NewReplacer(
+	"&", "&amp;",
+	"<", "&lt;",
+	`"`, "&quot;",
+	"\t", "&#x9;",
+	"\n", "&#xA;",
+	"\r", "&#xD;",
+)
+
+// writeTo serializes the attribute to the writer.
+func (a *Attr) writeTo(w *bufio.Writer, s *WriteSettings) {
+	if a.Space != "" {
+		w.WriteString(a.Space)
+		w.WriteByte(':')
+	}
+	w.WriteString(a.Key)
+	w.WriteString(`="`)
+	var r *strings.Replacer
+	if s.CanonicalAttrVal {
+		r = xmlReplacerCanonicalAttrVal
+	} else {
+		r = xmlReplacerNormal
+	}
+	w.WriteString(r.Replace(a.Value))
+	w.WriteByte('"')
+}
+
+// NewCharData creates a parentless XML character data entity.
+func NewCharData(data string) *CharData {
+	return newCharData(data, false, nil)
+}
+
+// newCharData creates an XML character data entity and binds it to a parent
+// element. If parent is nil, the CharData token remains unbound.
+func newCharData(data string, whitespace bool, parent *Element) *CharData {
+	c := &CharData{
+		Data:       data,
+		whitespace: whitespace,
+		parent:     parent,
+	}
+	if parent != nil {
+		parent.addChild(c)
+	}
+	return c
+}
+
+// CreateCharData creates an XML character data entity and adds it as a child
+// of element e.
+func (e *Element) CreateCharData(data string) *CharData {
+	return newCharData(data, false, e)
+}
+
+// dup duplicates the character data.
+func (c *CharData) dup(parent *Element) Token {
+	return &CharData{
+		Data:       c.Data,
+		whitespace: c.whitespace,
+		parent:     parent,
+	}
+}
+
+// Parent returns the character data token's parent element, or nil if it has
+// no parent.
+func (c *CharData) Parent() *Element {
+	return c.parent
+}
+
+// setParent replaces the character data token's parent.
+func (c *CharData) setParent(parent *Element) {
+	c.parent = parent
+}
+
+// writeTo serializes the character data entity to the writer.
+func (c *CharData) writeTo(w *bufio.Writer, s *WriteSettings) {
+	var r *strings.Replacer
+	if s.CanonicalText {
+		r = xmlReplacerCanonicalText
+	} else {
+		r = xmlReplacerNormal
+	}
+	w.WriteString(r.Replace(c.Data))
+}
+
+// NewComment creates a parentless XML comment.
+func NewComment(comment string) *Comment {
+	return newComment(comment, nil)
+}
+
+// NewComment creates an XML comment and binds it to a parent element. If
+// parent is nil, the Comment remains unbound.
+func newComment(comment string, parent *Element) *Comment {
+	c := &Comment{
+		Data:   comment,
+		parent: parent,
+	}
+	if parent != nil {
+		parent.addChild(c)
+	}
+	return c
+}
+
+// CreateComment creates an XML comment and adds it as a child of element e.
+func (e *Element) CreateComment(comment string) *Comment {
+	return newComment(comment, e)
+}
+
+// dup duplicates the comment.
+func (c *Comment) dup(parent *Element) Token {
+	return &Comment{
+		Data:   c.Data,
+		parent: parent,
+	}
+}
+
+// Parent returns comment token's parent element, or nil if it has no parent.
+func (c *Comment) Parent() *Element {
+	return c.parent
+}
+
+// setParent replaces the comment token's parent.
+func (c *Comment) setParent(parent *Element) {
+	c.parent = parent
+}
+
+// writeTo serialies the comment to the writer.
+func (c *Comment) writeTo(w *bufio.Writer, s *WriteSettings) {
+	w.WriteString("<!--")
+	w.WriteString(c.Data)
+	w.WriteString("-->")
+}
+
+// NewDirective creates a parentless XML directive.
+func NewDirective(data string) *Directive {
+	return newDirective(data, nil)
+}
+
+// newDirective creates an XML directive and binds it to a parent element. If
+// parent is nil, the Directive remains unbound.
+func newDirective(data string, parent *Element) *Directive {
+	d := &Directive{
+		Data:   data,
+		parent: parent,
+	}
+	if parent != nil {
+		parent.addChild(d)
+	}
+	return d
+}
+
+// CreateDirective creates an XML directive and adds it as the last child of
+// element e.
+func (e *Element) CreateDirective(data string) *Directive {
+	return newDirective(data, e)
+}
+
+// dup duplicates the directive.
+func (d *Directive) dup(parent *Element) Token {
+	return &Directive{
+		Data:   d.Data,
+		parent: parent,
+	}
+}
+
+// Parent returns directive token's parent element, or nil if it has no
+// parent.
+func (d *Directive) Parent() *Element {
+	return d.parent
+}
+
+// setParent replaces the directive token's parent.
+func (d *Directive) setParent(parent *Element) {
+	d.parent = parent
+}
+
+// writeTo serializes the XML directive to the writer.
+func (d *Directive) writeTo(w *bufio.Writer, s *WriteSettings) {
+	w.WriteString("<!")
+	w.WriteString(d.Data)
+	w.WriteString(">")
+}
+
+// NewProcInst creates a parentless XML processing instruction.
+func NewProcInst(target, inst string) *ProcInst {
+	return newProcInst(target, inst, nil)
+}
+
+// newProcInst creates an XML processing instruction and binds it to a parent
+// element. If parent is nil, the ProcInst remains unbound.
+func newProcInst(target, inst string, parent *Element) *ProcInst {
+	p := &ProcInst{
+		Target: target,
+		Inst:   inst,
+		parent: parent,
+	}
+	if parent != nil {
+		parent.addChild(p)
+	}
+	return p
+}
+
+// CreateProcInst creates a processing instruction and adds it as a child of
+// element e.
+func (e *Element) CreateProcInst(target, inst string) *ProcInst {
+	return newProcInst(target, inst, e)
+}
+
+// dup duplicates the procinst.
+func (p *ProcInst) dup(parent *Element) Token {
+	return &ProcInst{
+		Target: p.Target,
+		Inst:   p.Inst,
+		parent: parent,
+	}
+}
+
+// Parent returns processing instruction token's parent element, or nil if it
+// has no parent.
+func (p *ProcInst) Parent() *Element {
+	return p.parent
+}
+
+// setParent replaces the processing instruction token's parent.
+func (p *ProcInst) setParent(parent *Element) {
+	p.parent = parent
+}
+
+// writeTo serializes the processing instruction to the writer.
+func (p *ProcInst) writeTo(w *bufio.Writer, s *WriteSettings) {
+	w.WriteString("<?")
+	w.WriteString(p.Target)
+	if p.Inst != "" {
+		w.WriteByte(' ')
+		w.WriteString(p.Inst)
+	}
+	w.WriteString("?>")
+}
diff --git a/vendor/github.com/beevik/etree/helpers.go b/vendor/github.com/beevik/etree/helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f8350e70c6be579afb8a09d22df838d0c07a157
--- /dev/null
+++ b/vendor/github.com/beevik/etree/helpers.go
@@ -0,0 +1,188 @@
+// Copyright 2015 Brett Vickers.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package etree
+
+import (
+	"io"
+	"strings"
+)
+
+// A simple stack
+type stack struct {
+	data []interface{}
+}
+
+func (s *stack) empty() bool {
+	return len(s.data) == 0
+}
+
+func (s *stack) push(value interface{}) {
+	s.data = append(s.data, value)
+}
+
+func (s *stack) pop() interface{} {
+	value := s.data[len(s.data)-1]
+	s.data[len(s.data)-1] = nil
+	s.data = s.data[:len(s.data)-1]
+	return value
+}
+
+func (s *stack) peek() interface{} {
+	return s.data[len(s.data)-1]
+}
+
+// A fifo is a simple first-in-first-out queue.
+type fifo struct {
+	data       []interface{}
+	head, tail int
+}
+
+func (f *fifo) add(value interface{}) {
+	if f.len()+1 >= len(f.data) {
+		f.grow()
+	}
+	f.data[f.tail] = value
+	if f.tail++; f.tail == len(f.data) {
+		f.tail = 0
+	}
+}
+
+func (f *fifo) remove() interface{} {
+	value := f.data[f.head]
+	f.data[f.head] = nil
+	if f.head++; f.head == len(f.data) {
+		f.head = 0
+	}
+	return value
+}
+
+func (f *fifo) len() int {
+	if f.tail >= f.head {
+		return f.tail - f.head
+	}
+	return len(f.data) - f.head + f.tail
+}
+
+func (f *fifo) grow() {
+	c := len(f.data) * 2
+	if c == 0 {
+		c = 4
+	}
+	buf, count := make([]interface{}, c), f.len()
+	if f.tail >= f.head {
+		copy(buf[0:count], f.data[f.head:f.tail])
+	} else {
+		hindex := len(f.data) - f.head
+		copy(buf[0:hindex], f.data[f.head:])
+		copy(buf[hindex:count], f.data[:f.tail])
+	}
+	f.data, f.head, f.tail = buf, 0, count
+}
+
+// countReader implements a proxy reader that counts the number of
+// bytes read from its encapsulated reader.
+type countReader struct {
+	r     io.Reader
+	bytes int64
+}
+
+func newCountReader(r io.Reader) *countReader {
+	return &countReader{r: r}
+}
+
+func (cr *countReader) Read(p []byte) (n int, err error) {
+	b, err := cr.r.Read(p)
+	cr.bytes += int64(b)
+	return b, err
+}
+
+// countWriter implements a proxy writer that counts the number of
+// bytes written by its encapsulated writer.
+type countWriter struct {
+	w     io.Writer
+	bytes int64
+}
+
+func newCountWriter(w io.Writer) *countWriter {
+	return &countWriter{w: w}
+}
+
+func (cw *countWriter) Write(p []byte) (n int, err error) {
+	b, err := cw.w.Write(p)
+	cw.bytes += int64(b)
+	return b, err
+}
+
+// isWhitespace returns true if the byte slice contains only
+// whitespace characters.
+func isWhitespace(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' {
+			return false
+		}
+	}
+	return true
+}
+
+// spaceMatch returns true if namespace a is the empty string
+// or if namespace a equals namespace b.
+func spaceMatch(a, b string) bool {
+	switch {
+	case a == "":
+		return true
+	default:
+		return a == b
+	}
+}
+
+// spaceDecompose breaks a namespace:tag identifier at the ':'
+// and returns the two parts.
+func spaceDecompose(str string) (space, key string) {
+	colon := strings.IndexByte(str, ':')
+	if colon == -1 {
+		return "", str
+	}
+	return str[:colon], str[colon+1:]
+}
+
+// Strings used by crIndent
+const (
+	crsp  = "\n                                                                "
+	crtab = "\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
+)
+
+// crIndent returns a carriage return followed by n copies of the
+// first non-CR character in the source string.
+func crIndent(n int, source string) string {
+	switch {
+	case n < 0:
+		return source[:1]
+	case n < len(source):
+		return source[:n+1]
+	default:
+		return source + strings.Repeat(source[1:2], n-len(source)+1)
+	}
+}
+
+// nextIndex returns the index of the next occurrence of sep in s,
+// starting from offset.  It returns -1 if the sep string is not found.
+func nextIndex(s, sep string, offset int) int {
+	switch i := strings.Index(s[offset:], sep); i {
+	case -1:
+		return -1
+	default:
+		return offset + i
+	}
+}
+
+// isInteger returns true if the string s contains an integer.
+func isInteger(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/beevik/etree/path.go b/vendor/github.com/beevik/etree/path.go
new file mode 100644
index 0000000000000000000000000000000000000000..9cf245eb39373b7bb3ca823031fb8d41c44f36b8
--- /dev/null
+++ b/vendor/github.com/beevik/etree/path.go
@@ -0,0 +1,516 @@
+// Copyright 2015 Brett Vickers.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package etree
+
+import (
+	"strconv"
+	"strings"
+)
+
+/*
+A Path is an object that represents an optimized version of an
+XPath-like search string.  Although path strings are XPath-like,
+only the following limited syntax is supported:
+
+    .               Selects the current element
+    ..              Selects the parent of the current element
+    *               Selects all child elements
+    //              Selects all descendants of the current element
+    tag             Selects all child elements with the given tag
+    [#]             Selects the element of the given index (1-based,
+                      negative starts from the end)
+    [@attrib]       Selects all elements with the given attribute
+    [@attrib='val'] Selects all elements with the given attribute set to val
+    [tag]           Selects all elements with a child element named tag
+    [tag='val']     Selects all elements with a child element named tag
+                      and text matching val
+    [text()]        Selects all elements with non-empty text
+    [text()='val']  Selects all elements whose text matches val
+
+Examples:
+
+Select the title elements of all descendant book elements having a
+'category' attribute of 'WEB':
+    //book[@category='WEB']/title
+
+Select the first book element with a title child containing the text
+'Great Expectations':
+    .//book[title='Great Expectations'][1]
+
+Starting from the current element, select all children of book elements
+with an attribute 'language' set to 'english':
+    ./book/*[@language='english']
+
+Starting from the current element, select all children of book elements
+containing the text 'special':
+    ./book/*[text()='special']
+
+Select all descendant book elements whose title element has an attribute
+'language' set to 'french':
+    //book/title[@language='french']/..
+
+*/
+type Path struct {
+	segments []segment
+}
+
+// ErrPath is returned by path functions when an invalid etree path is provided.
+type ErrPath string
+
+// Error returns the string describing a path error.
+func (err ErrPath) Error() string {
+	return "etree: " + string(err)
+}
+
+// CompilePath creates an optimized version of an XPath-like string that
+// can be used to query elements in an element tree.
+func CompilePath(path string) (Path, error) {
+	var comp compiler
+	segments := comp.parsePath(path)
+	if comp.err != ErrPath("") {
+		return Path{nil}, comp.err
+	}
+	return Path{segments}, nil
+}
+
+// MustCompilePath creates an optimized version of an XPath-like string that
+// can be used to query elements in an element tree.  Panics if an error
+// occurs.  Use this function to create Paths when you know the path is
+// valid (i.e., if it's hard-coded).
+func MustCompilePath(path string) Path {
+	p, err := CompilePath(path)
+	if err != nil {
+		panic(err)
+	}
+	return p
+}
+
+// A segment is a portion of a path between "/" characters.
+// It contains one selector and zero or more [filters].
+type segment struct {
+	sel     selector
+	filters []filter
+}
+
+func (seg *segment) apply(e *Element, p *pather) {
+	seg.sel.apply(e, p)
+	for _, f := range seg.filters {
+		f.apply(p)
+	}
+}
+
+// A selector selects XML elements for consideration by the
+// path traversal.
+type selector interface {
+	apply(e *Element, p *pather)
+}
+
+// A filter pares down a list of candidate XML elements based
+// on a path filter in [brackets].
+type filter interface {
+	apply(p *pather)
+}
+
+// A pather is helper object that traverses an element tree using
+// a Path object.  It collects and deduplicates all elements matching
+// the path query.
+type pather struct {
+	queue      fifo
+	results    []*Element
+	inResults  map[*Element]bool
+	candidates []*Element
+	scratch    []*Element // used by filters
+}
+
+// A node represents an element and the remaining path segments that
+// should be applied against it by the pather.
+type node struct {
+	e        *Element
+	segments []segment
+}
+
+func newPather() *pather {
+	return &pather{
+		results:    make([]*Element, 0),
+		inResults:  make(map[*Element]bool),
+		candidates: make([]*Element, 0),
+		scratch:    make([]*Element, 0),
+	}
+}
+
+// traverse follows the path from the element e, collecting
+// and then returning all elements that match the path's selectors
+// and filters.
+func (p *pather) traverse(e *Element, path Path) []*Element {
+	for p.queue.add(node{e, path.segments}); p.queue.len() > 0; {
+		p.eval(p.queue.remove().(node))
+	}
+	return p.results
+}
+
+// eval evalutes the current path node by applying the remaining
+// path's selector rules against the node's element.
+func (p *pather) eval(n node) {
+	p.candidates = p.candidates[0:0]
+	seg, remain := n.segments[0], n.segments[1:]
+	seg.apply(n.e, p)
+
+	if len(remain) == 0 {
+		for _, c := range p.candidates {
+			if in := p.inResults[c]; !in {
+				p.inResults[c] = true
+				p.results = append(p.results, c)
+			}
+		}
+	} else {
+		for _, c := range p.candidates {
+			p.queue.add(node{c, remain})
+		}
+	}
+}
+
+// A compiler generates a compiled path from a path string.
+type compiler struct {
+	err ErrPath
+}
+
+// parsePath parses an XPath-like string describing a path
+// through an element tree and returns a slice of segment
+// descriptors.
+func (c *compiler) parsePath(path string) []segment {
+	// If path starts or ends with //, fix it
+	if strings.HasPrefix(path, "//") {
+		path = "." + path
+	}
+	if strings.HasSuffix(path, "//") {
+		path = path + "*"
+	}
+
+	// Paths cannot be absolute
+	if strings.HasPrefix(path, "/") {
+		c.err = ErrPath("paths cannot be absolute.")
+		return nil
+	}
+
+	// Split path into segment objects
+	var segments []segment
+	for _, s := range splitPath(path) {
+		segments = append(segments, c.parseSegment(s))
+		if c.err != ErrPath("") {
+			break
+		}
+	}
+	return segments
+}
+
+func splitPath(path string) []string {
+	pieces := make([]string, 0)
+	start := 0
+	inquote := false
+	for i := 0; i+1 <= len(path); i++ {
+		if path[i] == '\'' {
+			inquote = !inquote
+		} else if path[i] == '/' && !inquote {
+			pieces = append(pieces, path[start:i])
+			start = i + 1
+		}
+	}
+	return append(pieces, path[start:])
+}
+
+// parseSegment parses a path segment between / characters.
+func (c *compiler) parseSegment(path string) segment {
+	pieces := strings.Split(path, "[")
+	seg := segment{
+		sel:     c.parseSelector(pieces[0]),
+		filters: make([]filter, 0),
+	}
+	for i := 1; i < len(pieces); i++ {
+		fpath := pieces[i]
+		if fpath[len(fpath)-1] != ']' {
+			c.err = ErrPath("path has invalid filter [brackets].")
+			break
+		}
+		seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1]))
+	}
+	return seg
+}
+
+// parseSelector parses a selector at the start of a path segment.
+func (c *compiler) parseSelector(path string) selector {
+	switch path {
+	case ".":
+		return new(selectSelf)
+	case "..":
+		return new(selectParent)
+	case "*":
+		return new(selectChildren)
+	case "":
+		return new(selectDescendants)
+	default:
+		return newSelectChildrenByTag(path)
+	}
+}
+
+// parseFilter parses a path filter contained within [brackets].
+func (c *compiler) parseFilter(path string) filter {
+	if len(path) == 0 {
+		c.err = ErrPath("path contains an empty filter expression.")
+		return nil
+	}
+
+	// Filter contains [@attr='val'], [text()='val'], or [tag='val']?
+	eqindex := strings.Index(path, "='")
+	if eqindex >= 0 {
+		rindex := nextIndex(path, "'", eqindex+2)
+		if rindex != len(path)-1 {
+			c.err = ErrPath("path has mismatched filter quotes.")
+			return nil
+		}
+		switch {
+		case path[0] == '@':
+			return newFilterAttrVal(path[1:eqindex], path[eqindex+2:rindex])
+		case strings.HasPrefix(path, "text()"):
+			return newFilterTextVal(path[eqindex+2 : rindex])
+		default:
+			return newFilterChildText(path[:eqindex], path[eqindex+2:rindex])
+		}
+	}
+
+	// Filter contains [@attr], [N], [tag] or [text()]
+	switch {
+	case path[0] == '@':
+		return newFilterAttr(path[1:])
+	case path == "text()":
+		return newFilterText()
+	case isInteger(path):
+		pos, _ := strconv.Atoi(path)
+		switch {
+		case pos > 0:
+			return newFilterPos(pos - 1)
+		default:
+			return newFilterPos(pos)
+		}
+	default:
+		return newFilterChild(path)
+	}
+}
+
+// selectSelf selects the current element into the candidate list.
+type selectSelf struct{}
+
+func (s *selectSelf) apply(e *Element, p *pather) {
+	p.candidates = append(p.candidates, e)
+}
+
+// selectParent selects the element's parent into the candidate list.
+type selectParent struct{}
+
+func (s *selectParent) apply(e *Element, p *pather) {
+	if e.parent != nil {
+		p.candidates = append(p.candidates, e.parent)
+	}
+}
+
+// selectChildren selects the element's child elements into the
+// candidate list.
+type selectChildren struct{}
+
+func (s *selectChildren) apply(e *Element, p *pather) {
+	for _, c := range e.Child {
+		if c, ok := c.(*Element); ok {
+			p.candidates = append(p.candidates, c)
+		}
+	}
+}
+
+// selectDescendants selects all descendant child elements
+// of the element into the candidate list.
+type selectDescendants struct{}
+
+func (s *selectDescendants) apply(e *Element, p *pather) {
+	var queue fifo
+	for queue.add(e); queue.len() > 0; {
+		e := queue.remove().(*Element)
+		p.candidates = append(p.candidates, e)
+		for _, c := range e.Child {
+			if c, ok := c.(*Element); ok {
+				queue.add(c)
+			}
+		}
+	}
+}
+
+// selectChildrenByTag selects into the candidate list all child
+// elements of the element having the specified tag.
+type selectChildrenByTag struct {
+	space, tag string
+}
+
+func newSelectChildrenByTag(path string) *selectChildrenByTag {
+	s, l := spaceDecompose(path)
+	return &selectChildrenByTag{s, l}
+}
+
+func (s *selectChildrenByTag) apply(e *Element, p *pather) {
+	for _, c := range e.Child {
+		if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag {
+			p.candidates = append(p.candidates, c)
+		}
+	}
+}
+
+// filterPos filters the candidate list, keeping only the
+// candidate at the specified index.
+type filterPos struct {
+	index int
+}
+
+func newFilterPos(pos int) *filterPos {
+	return &filterPos{pos}
+}
+
+func (f *filterPos) apply(p *pather) {
+	if f.index >= 0 {
+		if f.index < len(p.candidates) {
+			p.scratch = append(p.scratch, p.candidates[f.index])
+		}
+	} else {
+		if -f.index <= len(p.candidates) {
+			p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index])
+		}
+	}
+	p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterAttr filters the candidate list for elements having
+// the specified attribute.
+type filterAttr struct {
+	space, key string
+}
+
+func newFilterAttr(str string) *filterAttr {
+	s, l := spaceDecompose(str)
+	return &filterAttr{s, l}
+}
+
+func (f *filterAttr) apply(p *pather) {
+	for _, c := range p.candidates {
+		for _, a := range c.Attr {
+			if spaceMatch(f.space, a.Space) && f.key == a.Key {
+				p.scratch = append(p.scratch, c)
+				break
+			}
+		}
+	}
+	p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterAttrVal filters the candidate list for elements having
+// the specified attribute with the specified value.
+type filterAttrVal struct {
+	space, key, val string
+}
+
+func newFilterAttrVal(str, value string) *filterAttrVal {
+	s, l := spaceDecompose(str)
+	return &filterAttrVal{s, l, value}
+}
+
+func (f *filterAttrVal) apply(p *pather) {
+	for _, c := range p.candidates {
+		for _, a := range c.Attr {
+			if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value {
+				p.scratch = append(p.scratch, c)
+				break
+			}
+		}
+	}
+	p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterText filters the candidate list for elements having text.
+type filterText struct{}
+
+func newFilterText() *filterText {
+	return &filterText{}
+}
+
+func (f *filterText) apply(p *pather) {
+	for _, c := range p.candidates {
+		if c.Text() != "" {
+			p.scratch = append(p.scratch, c)
+		}
+	}
+	p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterTextVal filters the candidate list for elements having
+// text equal to the specified value.
+type filterTextVal struct {
+	val string
+}
+
+func newFilterTextVal(value string) *filterTextVal {
+	return &filterTextVal{value}
+}
+
+func (f *filterTextVal) apply(p *pather) {
+	for _, c := range p.candidates {
+		if c.Text() == f.val {
+			p.scratch = append(p.scratch, c)
+		}
+	}
+	p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterChild filters the candidate list for elements having
+// a child element with the specified tag.
+type filterChild struct {
+	space, tag string
+}
+
+func newFilterChild(str string) *filterChild {
+	s, l := spaceDecompose(str)
+	return &filterChild{s, l}
+}
+
+func (f *filterChild) apply(p *pather) {
+	for _, c := range p.candidates {
+		for _, cc := range c.Child {
+			if cc, ok := cc.(*Element); ok &&
+				spaceMatch(f.space, cc.Space) &&
+				f.tag == cc.Tag {
+				p.scratch = append(p.scratch, c)
+			}
+		}
+	}
+	p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
+
+// filterChildText filters the candidate list for elements having
+// a child element with the specified tag and text.
+type filterChildText struct {
+	space, tag, text string
+}
+
+func newFilterChildText(str, text string) *filterChildText {
+	s, l := spaceDecompose(str)
+	return &filterChildText{s, l, text}
+}
+
+func (f *filterChildText) apply(p *pather) {
+	for _, c := range p.candidates {
+		for _, cc := range c.Child {
+			if cc, ok := cc.(*Element); ok &&
+				spaceMatch(f.space, cc.Space) &&
+				f.tag == cc.Tag &&
+				f.text == cc.Text() {
+				p.scratch = append(p.scratch, c)
+			}
+		}
+	}
+	p.candidates, p.scratch = p.scratch, p.candidates[0:0]
+}
diff --git a/vendor/github.com/crewjam/saml/Gopkg.lock b/vendor/github.com/crewjam/saml/Gopkg.lock
new file mode 100644
index 0000000000000000000000000000000000000000..38a8fec39e7d7446b3159c3049294b70e5fc9036
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/Gopkg.lock
@@ -0,0 +1,53 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#  name = "github.com/x/y"
+#  version = "2.4.0"
+
+
+[[constraint]]
+  name = "github.com/beevik/etree"
+  version = "1.0.0"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/dchest/uniuri"
+
+[[constraint]]
+  name = "github.com/dgrijalva/jwt-go"
+  version = "3.0.0"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/kr/pretty"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/russellhaering/goxmldsig"
+
+[[constraint]]
+  name = "github.com/zenazn/goji"
+  version = "1.0.0"
+
+[[constraint]]
+  branch = "master"
+  name = "golang.org/x/crypto"
+
+[[constraint]]
+  branch = "v1"
+  name = "gopkg.in/check.v1"
diff --git a/vendor/github.com/crewjam/saml/Gopkg.toml b/vendor/github.com/crewjam/saml/Gopkg.toml
new file mode 100644
index 0000000000000000000000000000000000000000..76f5f67c0dbf6198459f88691f99ebcddc7a37d4
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/Gopkg.toml
@@ -0,0 +1,70 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/beevik/etree"
+  packages = ["."]
+  revision = "15a30b44cfd6c5a16a7ddfe271bf146aaf2d3195"
+  version = "v1.0.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/dchest/uniuri"
+  packages = ["."]
+  revision = "8902c56451e9b58ff940bbe5fec35d5f9c04584a"
+
+[[projects]]
+  name = "github.com/dgrijalva/jwt-go"
+  packages = ["."]
+  revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
+  version = "v3.0.0"
+
+[[projects]]
+  name = "github.com/jonboulle/clockwork"
+  packages = ["."]
+  revision = "2eee05ed794112d45db504eb05aa693efd2b8b09"
+  version = "v0.1.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/kr/pretty"
+  packages = ["."]
+  revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/kr/text"
+  packages = ["."]
+  revision = "7cafcd837844e784b526369c9bce262804aebc60"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/russellhaering/goxmldsig"
+  packages = [".","etreeutils","types"]
+  revision = "b7efc6231e45b10bfd779852831c8bb59b350ec5"
+
+[[projects]]
+  name = "github.com/zenazn/goji"
+  packages = [".","bind","graceful","graceful/listener","web","web/middleware","web/mutil"]
+  revision = "64eb34159fe53473206c2b3e70fe396a639452f2"
+  version = "v1.0"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/crypto"
+  packages = ["bcrypt","blowfish","ripemd160"]
+  revision = "847319b7fc94cab682988f93da778204da164588"
+
+[[projects]]
+  branch = "v1"
+  name = "gopkg.in/check.v1"
+  packages = ["."]
+  revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "253ec289f823a19c6473233e4934b31f5e623b0fb3136183b129cb652a5685c2"
+  solver-name = "gps-cdcl"
+  solver-version = 1
+
diff --git a/vendor/github.com/crewjam/saml/LICENSE b/vendor/github.com/crewjam/saml/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..1235fd34ea27b0c47c65e3b3b6cd16d076475a0c
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Ross Kinder
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/crewjam/saml/NOTES.md b/vendor/github.com/crewjam/saml/NOTES.md
new file mode 100644
index 0000000000000000000000000000000000000000..33c3ac31deb26b53bbcd79269c867038a0daf604
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/NOTES.md
@@ -0,0 +1,21 @@
+* https://github.com/lestrrat/go-libxml2
+* https://github.com/onelogin/python-saml
+   - reads settings from a JSON file (yuck)
+   - BSD License (3-clause)
+* TODO: understand xml bomb (https://pypi.python.org/pypi/defusedxml)
+
+* Providers for SAML SP & IDP endpoints 
+* Methods for generating and authenticating various SAML flows
+
+
+
+
+Current working demo:
+
+term1: go run ./example/idp/idp.go -bind :8001
+term2: ngrok http 8001
+term4: ngrok http 8000
+edit example.go and fill in values for baseURL (term4) and idpMetadataURL (term2)
+term3: go run ./example/example.go -bind :8000
+term5: curl -v https://$SP.ngrok.io/saml/metadata | curl -v -H "Content-type: text/xml" --data-binary @- https://$IDP.ngrok.io/register-sp
+browser: https://$SP.ngrok.io
diff --git a/vendor/github.com/crewjam/saml/README.md b/vendor/github.com/crewjam/saml/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..cb8d7282fe708d751ff1c2239a97eaf58859e511
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/README.md
@@ -0,0 +1,162 @@
+
+## Breaking Changes 
+
+Note: between version 0.2.0 and the current master include changes to the API
+that will break your existing code a little.
+
+This change turned some fields from pointers to a single optional struct into
+the more correct slice of struct, and to pluralize the field name. For example,
+`IDPSSODescriptor *IDPSSODescriptor` has become 
+`IDPSSODescriptors []IDPSSODescriptor`. This more accurately reflects the 
+standard.
+
+The struct `Metadata` has been renamed to `EntityDescriptor`. In 0.2.0 and before, 
+every struct derived from the standard has the same name as in the standard, 
+*except* for `Metadata` which should always have been called `EntityDescriptor`. 
+
+In various places `url.URL` is now used where `string` was used <= version 0.1.0.
+
+In various places where keys and certificates were modeled as `string` 
+<= version 0.1.0 (what was I thinking?!) they are now modeled as 
+`*rsa.PrivateKey`, `*x509.Certificate`, or `crypto.PrivateKey` as appropriate.
+
+## Introduction
+
+Package saml contains a partial implementation of the SAML standard in golang.
+SAML is a standard for identity federation, i.e. either allowing a third party to authenticate your users or allowing third parties to rely on us to authenticate their users.
+
+In SAML parlance an **Identity Provider** (IDP) is a service that knows how to authenticate users. A **Service Provider** (SP) is a service that delegates authentication to an IDP. If you are building a service where users log in with someone else's credentials, then you are a **Service Provider**. This package supports implementing both service providers and identity providers.
+
+The core package contains the implementation of SAML. The package samlsp provides helper middleware suitable for use in Service Provider applications. The package samlidp provides a rudimentary IDP service that is useful for testing or as a starting point for other integrations.
+
+## Getting Started as a Service Provider
+
+Let us assume we have a simple web appliation to protect. We'll modify this application so it uses SAML to authenticate users.
+```golang
+package main
+
+import "net/http"
+
+func hello(w http.ResponseWriter, r *http.Request) {
+    fmt.Fprintf(w, "Hello, World!")
+}
+
+func main() {
+    app := http.HandlerFunc(hello)
+    http.Handle("/hello", app)
+    http.ListenAndServe(":8000", nil)
+}
+```
+Each service provider must have an self-signed X.509 key pair established. You can generate your own with something like this:
+
+    openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com"
+
+We will use `samlsp.Middleware` to wrap the endpoint we want to protect. Middleware provides both an `http.Handler` to serve the SAML specific URLs **and** a set of wrappers to require the user to be logged in. We also provide the URL where the service provider can fetch the metadata from the IDP at startup. In our case, we'll use [testshib.org](https://www.testshib.org/), an identity provider designed for testing.
+
+```golang
+package main
+
+import (
+	"crypto/rsa"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/crewjam/saml/samlsp"
+)
+
+func hello(w http.ResponseWriter, r *http.Request) {
+	fmt.Fprintf(w, "Hello, %s!", r.Header.Get("X-Saml-Cn"))
+}
+
+func main() {
+	keyPair, err := tls.LoadX509KeyPair("myservice.cert", "myservice.key")
+	if err != nil {
+		panic(err) // TODO handle error
+	}
+	keyPair.Leaf, err = x509.ParseCertificate(keyPair.Certificate[0])
+	if err != nil {
+		panic(err) // TODO handle error
+	}
+
+	idpMetadataURL, err := url.Parse("https://www.testshib.org/metadata/testshib-providers.xml")
+	if err != nil {
+		panic(err) // TODO handle error
+	}
+
+	rootURL, err := url.Parse("http://localhost:8000")
+	if err != nil {
+		panic(err) // TODO handle error
+	}
+
+	samlSP, _ := samlsp.New(samlsp.Options{
+		URL:            *rootURL,
+		Key:            keyPair.PrivateKey.(*rsa.PrivateKey),
+		Certificate:    keyPair.Leaf,
+		IDPMetadataURL: idpMetadataURL,
+	})
+	app := http.HandlerFunc(hello)
+	http.Handle("/hello", samlSP.RequireAccount(app))
+	http.Handle("/saml/", samlSP)
+	http.ListenAndServe(":8000", nil)
+}
+```
+
+Next we'll have to register our service provider with the identiy provider to establish trust from the service provider to the IDP. For [testshib.org](https://www.testshib.org/), you can do something like:
+
+    mdpath=saml-test-$USER-$HOST.xml
+    curl localhost:8000/saml/metadata > $mdpath
+
+Naviate to https://www.testshib.org/register.html and upload the file you fetched.
+
+Now you should be able to authenticate. The flow should look like this:
+
+1. You browse to `localhost:8000/hello`
+
+2. The middleware redirects you to `https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO`
+
+3. testshib.org prompts you for a username and password.
+
+4. testshib.org returns you an HTML document which contains an HTML form setup to POST to `localhost:8000/saml/acs`. The form is automatically submitted if you have javascript enabled.
+
+5. The local service validates the response, issues a session cookie, and redirects you to the original URL, `localhost:8000/hello`.
+
+6. This time when `localhost:8000/hello` is requested there is a valid session and so the main content is served.
+
+## Getting Started as an Identity Provider
+
+Please see `examples/idp/` for a substantially complete example of how to use the library and helpers to be an identity provider.
+
+## Support
+
+The SAML standard is huge and complex with many dark corners and strange, unused features. This package implements the most commonly used subset of these features required to provide a single sign on experience. The package supports at least the subset of SAML known as [interoperable SAML](http://saml2int.org).
+
+This package supports the **Web SSO** profile. Message flows from the service provider to the IDP are supported using the **HTTP Redirect** binding and the **HTTP POST** binding. Message flows from the IDP to the service provider are supported via the **HTTP POST** binding.
+
+The package supports signed and encrypted SAML assertions. It does not support signed or encrypted requests.
+
+## RelayState
+
+The *RelayState* parameter allows you to pass user state information across the authentication flow. The most common use for this is to allow a user to request a deep link into your site, be redirected through the SAML login flow, and upon successful completion, be directed to the originaly requested link, rather than the root.
+
+Unfortunately, *RelayState* is less useful than it could be. Firstly, it is **not** authenticated, so anything you supply must be signed to avoid XSS or CSRF. Secondly, it is limited to 80 bytes in length, which precludes signing. (See section 3.6.3.1 of SAMLProfiles.)
+
+## References
+
+The SAML specification is a collection of PDFs (sadly):
+
+- [SAMLCore](http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf) defines data types.
+
+- [SAMLBindings](http://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf) defines the details of the HTTP requests in play.
+
+- [SAMLProfiles](http://docs.oasis-open.org/security/saml/v2.0/saml-profiles-2.0-os.pdf) describes data flows.
+
+- [SAMLConformance](http://docs.oasis-open.org/security/saml/v2.0/saml-conformance-2.0-os.pdf) includes a support matrix for various parts of the protocol.
+
+[TestShib](https://www.testshib.org/) is a testing ground for SAML service and identity providers.
+
+## Security Issues
+
+Please do not report security issues in the issue tracker. Rather, please contact me directly at ross@kndr.org ([PGP Key `8EA205C01C425FF195A5E9A43FA0768F26FD2554`](https://keybase.io/crewjam)).
diff --git a/vendor/github.com/crewjam/saml/duration.go b/vendor/github.com/crewjam/saml/duration.go
new file mode 100644
index 0000000000000000000000000000000000000000..7851d103a8bdc92cc9d20ec0a48e19486f6703a5
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/duration.go
@@ -0,0 +1,128 @@
+package saml
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Duration is a time.Duration that uses the xsd:duration format for text
+// marshalling and unmarshalling.
+type Duration time.Duration
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (d Duration) MarshalText() ([]byte, error) {
+	if d == 0 {
+		return nil, nil
+	}
+
+	out := "PT"
+	if d < 0 {
+		d *= -1
+		out = "-" + out
+	}
+
+	h := time.Duration(d) / time.Hour
+	m := time.Duration(d) % time.Hour / time.Minute
+	s := time.Duration(d) % time.Minute / time.Second
+	ns := time.Duration(d) % time.Second
+	if h > 0 {
+		out += fmt.Sprintf("%dH", h)
+	}
+	if m > 0 {
+		out += fmt.Sprintf("%dM", m)
+	}
+	if s > 0 || ns > 0 {
+		out += fmt.Sprintf("%d", s)
+		if ns > 0 {
+			out += strings.TrimRight(fmt.Sprintf(".%09d", ns), "0")
+		}
+		out += "S"
+	}
+
+	return []byte(out), nil
+}
+
+const (
+	day   = 24 * time.Hour
+	month = 30 * day  // Assumed to be 30 days.
+	year  = 365 * day // Assumed to be non-leap year.
+)
+
+var (
+	durationRegexp     = regexp.MustCompile(`^(-?)P(?:(\d+)Y)?(?:(\d+)M)?(?:(\d+)D)?(?:T(.+))?$`)
+	durationTimeRegexp = regexp.MustCompile(`^(?:(\d+)H)?(?:(\d+)M)?(?:(\d+(?:\.\d+)?)S)?$`)
+)
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (d *Duration) UnmarshalText(text []byte) error {
+	if text == nil {
+		*d = 0
+		return nil
+	}
+
+	var (
+		out  time.Duration
+		sign time.Duration = 1
+	)
+	match := durationRegexp.FindStringSubmatch(string(text))
+	if match == nil || strings.Join(match[2:6], "") == "" {
+		return fmt.Errorf("invalid duration (%s)", text)
+	}
+	if match[1] == "-" {
+		sign = -1
+	}
+	if match[2] != "" {
+		y, err := strconv.Atoi(match[2])
+		if err != nil {
+			return fmt.Errorf("invalid duration years (%s): %s", text, err)
+		}
+		out += time.Duration(y) * year
+	}
+	if match[3] != "" {
+		m, err := strconv.Atoi(match[3])
+		if err != nil {
+			return fmt.Errorf("invalid duration months (%s): %s", text, err)
+		}
+		out += time.Duration(m) * month
+	}
+	if match[4] != "" {
+		d, err := strconv.Atoi(match[4])
+		if err != nil {
+			return fmt.Errorf("invalid duration days (%s): %s", text, err)
+		}
+		out += time.Duration(d) * day
+	}
+	if match[5] != "" {
+		match := durationTimeRegexp.FindStringSubmatch(match[5])
+		if match == nil {
+			return fmt.Errorf("invalid duration (%s)", text)
+		}
+		if match[1] != "" {
+			h, err := strconv.Atoi(match[1])
+			if err != nil {
+				return fmt.Errorf("invalid duration hours (%s): %s", text, err)
+			}
+			out += time.Duration(h) * time.Hour
+		}
+		if match[2] != "" {
+			m, err := strconv.Atoi(match[2])
+			if err != nil {
+				return fmt.Errorf("invalid duration minutes (%s): %s", text, err)
+			}
+			out += time.Duration(m) * time.Minute
+		}
+		if match[3] != "" {
+			s, err := strconv.ParseFloat(match[3], 64)
+			if err != nil {
+				return fmt.Errorf("invalid duration seconds (%s): %s", text, err)
+			}
+			out += time.Duration(s * float64(time.Second))
+		}
+	}
+
+	*d = Duration(sign * out)
+	return nil
+}
diff --git a/vendor/github.com/crewjam/saml/identity_provider.go b/vendor/github.com/crewjam/saml/identity_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..f76f16033ca65abdf1eac572ff06c8c57bc9483d
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/identity_provider.go
@@ -0,0 +1,889 @@
+package saml
+
+import (
+	"bytes"
+	"compress/flate"
+	"crypto"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/base64"
+	"encoding/xml"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"regexp"
+	"strconv"
+	"text/template"
+	"time"
+
+	"github.com/beevik/etree"
+	"github.com/crewjam/saml/logger"
+	"github.com/crewjam/saml/xmlenc"
+	dsig "github.com/russellhaering/goxmldsig"
+)
+
+// Session represents a user session. It is returned by the
+// SessionProvider implementation's GetSession method. Fields here
+// are used to set fields in the SAML assertion.
+type Session struct {
+	ID         string
+	CreateTime time.Time
+	ExpireTime time.Time
+	Index      string
+
+	NameID         string
+	Groups         []string
+	UserName       string
+	UserEmail      string
+	UserCommonName string
+	UserSurname    string
+	UserGivenName  string
+}
+
+// SessionProvider is an interface used by IdentityProvider to determine the
+// Session associated with a request. For an example implementation, see
+// GetSession in the samlidp package.
+type SessionProvider interface {
+	// GetSession returns the remote user session associated with the http.Request.
+	//
+	// If (and only if) the request is not associated with a session then GetSession
+	// must complete the HTTP request and return nil.
+	GetSession(w http.ResponseWriter, r *http.Request, req *IdpAuthnRequest) *Session
+}
+
+// ServiceProviderProvider is an interface used by IdentityProvider to look up
+// service provider metadata for a request.
+type ServiceProviderProvider interface {
+	// GetServiceProvider returns the Service Provider metadata for the
+	// service provider ID, which is typically the service provider's
+	// metadata URL. If an appropriate service provider cannot be found then
+	// the returned error must be os.ErrNotExist.
+	GetServiceProvider(r *http.Request, serviceProviderID string) (*EntityDescriptor, error)
+}
+
+// AssertionMaker is an interface used by IdentityProvider to construct the
+// assertion for a request. The default implementation is DefaultAssertionMaker,
+// which is used if not AssertionMaker is specified.
+type AssertionMaker interface {
+	// MakeAssertion constructs an assertion from session and the request and
+	// assigns it to req.Assertion.
+	MakeAssertion(req *IdpAuthnRequest, session *Session) error
+}
+
+// IdentityProvider implements the SAML Identity Provider role (IDP).
+//
+// An identity provider receives SAML assertion requests and responds
+// with SAML Assertions.
+//
+// You must provide a keypair that is used to
+// sign assertions.
+//
+// You must provide an implementation of ServiceProviderProvider which
+// returns
+//
+// You must provide an implementation of the SessionProvider which
+// handles the actual authentication (i.e. prompting for a username
+// and password).
+type IdentityProvider struct {
+	Key                     crypto.PrivateKey
+	Logger                  logger.Interface
+	Certificate             *x509.Certificate
+	MetadataURL             url.URL
+	SSOURL                  url.URL
+	ServiceProviderProvider ServiceProviderProvider
+	SessionProvider         SessionProvider
+	AssertionMaker          AssertionMaker
+}
+
+// Metadata returns the metadata structure for this identity provider.
+func (idp *IdentityProvider) Metadata() *EntityDescriptor {
+	certStr := base64.StdEncoding.EncodeToString(idp.Certificate.Raw)
+
+	return &EntityDescriptor{
+		EntityID:      idp.MetadataURL.String(),
+		ValidUntil:    TimeNow().Add(DefaultValidDuration),
+		CacheDuration: DefaultValidDuration,
+		IDPSSODescriptors: []IDPSSODescriptor{
+			IDPSSODescriptor{
+				SSODescriptor: SSODescriptor{
+					RoleDescriptor: RoleDescriptor{
+						ProtocolSupportEnumeration: "urn:oasis:names:tc:SAML:2.0:protocol",
+						KeyDescriptors: []KeyDescriptor{
+							{
+								Use: "signing",
+								KeyInfo: KeyInfo{
+									Certificate: certStr,
+								},
+							},
+							{
+								Use: "encryption",
+								KeyInfo: KeyInfo{
+									Certificate: certStr,
+								},
+								EncryptionMethods: []EncryptionMethod{
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#aes128-cbc"},
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#aes192-cbc"},
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#aes256-cbc"},
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"},
+								},
+							},
+						},
+					},
+					NameIDFormats: []NameIDFormat{NameIDFormat("urn:oasis:names:tc:SAML:2.0:nameid-format:transient")},
+				},
+				SingleSignOnServices: []Endpoint{
+					{
+						Binding:  HTTPRedirectBinding,
+						Location: idp.SSOURL.String(),
+					},
+					{
+						Binding:  HTTPPostBinding,
+						Location: idp.SSOURL.String(),
+					},
+				},
+			},
+		},
+	}
+}
+
+// Handler returns an http.Handler that serves the metadata and SSO
+// URLs
+func (idp *IdentityProvider) Handler() http.Handler {
+	mux := http.NewServeMux()
+	mux.HandleFunc(idp.MetadataURL.Path, idp.ServeMetadata)
+	mux.HandleFunc(idp.SSOURL.Path, idp.ServeSSO)
+	return mux
+}
+
+// ServeMetadata is an http.HandlerFunc that serves the IDP metadata
+func (idp *IdentityProvider) ServeMetadata(w http.ResponseWriter, r *http.Request) {
+	buf, _ := xml.MarshalIndent(idp.Metadata(), "", "  ")
+	w.Header().Set("Content-Type", "application/samlmetadata+xml")
+	w.Write(buf)
+}
+
+// ServeSSO handles SAML auth requests.
+//
+// When it gets a request for a user that does not have a valid session,
+// then it prompts the user via XXX.
+//
+// If the session already exists, then it produces a SAML assertion and
+// returns an HTTP response according to the specified binding. The
+// only supported binding right now is the HTTP-POST binding which returns
+// an HTML form in the appropriate format with Javascript to automatically
+// submit that form the to service provider's Assertion Customer Service
+// endpoint.
+//
+// If the SAML request is invalid or cannot be verified a simple StatusBadRequest
+// response is sent.
+//
+// If the assertion cannot be created or returned, a StatusInternalServerError
+// response is sent.
+func (idp *IdentityProvider) ServeSSO(w http.ResponseWriter, r *http.Request) {
+	req, err := NewIdpAuthnRequest(idp, r)
+	if err != nil {
+		idp.Logger.Printf("failed to parse request: %s", err)
+		http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
+		return
+	}
+
+	if err := req.Validate(); err != nil {
+		idp.Logger.Printf("failed to validate request: %s", err)
+		http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
+		return
+	}
+
+	// TODO(ross): we must check that the request ID has not been previously
+	//   issued.
+
+	session := idp.SessionProvider.GetSession(w, r, req)
+	if session == nil {
+		return
+	}
+
+	assertionMaker := idp.AssertionMaker
+	if assertionMaker == nil {
+		assertionMaker = DefaultAssertionMaker{}
+	}
+	if err := assertionMaker.MakeAssertion(req, session); err != nil {
+		idp.Logger.Printf("failed to make assertion: %s", err)
+		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+		return
+	}
+	if err := req.WriteResponse(w); err != nil {
+		idp.Logger.Printf("failed to write response: %s", err)
+		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+		return
+	}
+}
+
+// ServeIDPInitiated handes an IDP-initiated authorization request. Requests of this
+// type require us to know a registered service provider and (optionally) the RelayState
+// that will be passed to the application.
+func (idp *IdentityProvider) ServeIDPInitiated(w http.ResponseWriter, r *http.Request, serviceProviderID string, relayState string) {
+	req := &IdpAuthnRequest{
+		IDP:         idp,
+		HTTPRequest: r,
+		RelayState:  relayState,
+	}
+
+	session := idp.SessionProvider.GetSession(w, r, req)
+	if session == nil {
+		// If GetSession returns nil, it must have written an HTTP response, per the interface
+		// (this is probably because it drew a login form or something)
+		return
+	}
+
+	var err error
+	req.ServiceProviderMetadata, err = idp.ServiceProviderProvider.GetServiceProvider(r, serviceProviderID)
+	if err == os.ErrNotExist {
+		idp.Logger.Printf("cannot find service provider: %s", serviceProviderID)
+		http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+		return
+	} else if err != nil {
+		idp.Logger.Printf("cannot find service provider %s: %v", serviceProviderID, err)
+		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+		return
+	}
+
+	// find an ACS endpoint that we can use
+	for _, spssoDescriptor := range req.ServiceProviderMetadata.SPSSODescriptors {
+		for _, endpoint := range spssoDescriptor.AssertionConsumerServices {
+			if endpoint.Binding == HTTPPostBinding {
+				req.ACSEndpoint = &endpoint
+				req.SPSSODescriptor = &spssoDescriptor
+				break
+			}
+		}
+		if req.ACSEndpoint != nil {
+			break
+		}
+	}
+	if req.ACSEndpoint == nil {
+		idp.Logger.Printf("saml metadata does not contain an Assertion Customer Service url")
+		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+		return
+	}
+
+	assertionMaker := idp.AssertionMaker
+	if assertionMaker == nil {
+		assertionMaker = DefaultAssertionMaker{}
+	}
+	if err := assertionMaker.MakeAssertion(req, session); err != nil {
+		idp.Logger.Printf("failed to make assertion: %s", err)
+		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+		return
+	}
+
+	if err := req.WriteResponse(w); err != nil {
+		idp.Logger.Printf("failed to write response: %s", err)
+		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+		return
+	}
+}
+
+// IdpAuthnRequest is used by IdentityProvider to handle a single authentication request.
+type IdpAuthnRequest struct {
+	IDP                     *IdentityProvider
+	HTTPRequest             *http.Request
+	RelayState              string
+	RequestBuffer           []byte
+	Request                 AuthnRequest
+	ServiceProviderMetadata *EntityDescriptor
+	SPSSODescriptor         *SPSSODescriptor
+	ACSEndpoint             *IndexedEndpoint
+	Assertion               *Assertion
+	AssertionEl             *etree.Element
+	ResponseEl              *etree.Element
+}
+
+// NewIdpAuthnRequest returns a new IdpAuthnRequest for the given HTTP request to the authorization
+// service.
+func NewIdpAuthnRequest(idp *IdentityProvider, r *http.Request) (*IdpAuthnRequest, error) {
+	req := &IdpAuthnRequest{
+		IDP:         idp,
+		HTTPRequest: r,
+	}
+
+	switch r.Method {
+	case "GET":
+		compressedRequest, err := base64.StdEncoding.DecodeString(r.URL.Query().Get("SAMLRequest"))
+		if err != nil {
+			return nil, fmt.Errorf("cannot decode request: %s", err)
+		}
+		req.RequestBuffer, err = ioutil.ReadAll(flate.NewReader(bytes.NewReader(compressedRequest)))
+		if err != nil {
+			return nil, fmt.Errorf("cannot decompress request: %s", err)
+		}
+		req.RelayState = r.URL.Query().Get("RelayState")
+	case "POST":
+		if err := r.ParseForm(); err != nil {
+			return nil, err
+		}
+		var err error
+		req.RequestBuffer, err = base64.StdEncoding.DecodeString(r.PostForm.Get("SAMLRequest"))
+		if err != nil {
+			return nil, err
+		}
+		req.RelayState = r.PostForm.Get("RelayState")
+	default:
+		return nil, fmt.Errorf("method not allowed")
+	}
+	return req, nil
+}
+
+// Validate checks that the authentication request is valid and assigns
+// the AuthnRequest and Metadata properties. Returns a non-nil error if the
+// request is not valid.
+func (req *IdpAuthnRequest) Validate() error {
+	if err := xml.Unmarshal(req.RequestBuffer, &req.Request); err != nil {
+		return err
+	}
+
+	// We always have exactly one IDP SSO descriptor
+	if len(req.IDP.Metadata().IDPSSODescriptors) != 1 {
+		panic("expected exactly one IDP SSO descriptor in IDP metadata")
+	}
+	idpSsoDescriptor := req.IDP.Metadata().IDPSSODescriptors[0]
+
+	// TODO(ross): support signed authn requests
+	// For now we do the safe thing and fail in the case where we think
+	// requests might be signed.
+	if idpSsoDescriptor.WantAuthnRequestsSigned != nil && *idpSsoDescriptor.WantAuthnRequestsSigned {
+		return fmt.Errorf("Authn request signature checking is not currently supported")
+	}
+
+	// In http://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf §3.4.5.2
+	// we get a description of the Destination attribute:
+	//
+	//   If the message is signed, the Destination XML attribute in the root SAML
+	//   element of the protocol message MUST contain the URL to which the sender
+	//   has instructed the user agent to deliver the message. The recipient MUST
+	//   then verify that the value matches the location at which the message has
+	//   been received.
+	//
+	// We require the destination be correct either (a) if signing is enabled or
+	// (b) if it was provided.
+	mustHaveDestination := idpSsoDescriptor.WantAuthnRequestsSigned != nil && *idpSsoDescriptor.WantAuthnRequestsSigned
+	mustHaveDestination = mustHaveDestination || req.Request.Destination != ""
+	if mustHaveDestination {
+		if req.Request.Destination != req.IDP.SSOURL.String() {
+			return fmt.Errorf("expected destination to be %q, not %q", req.IDP.SSOURL.String(), req.Request.Destination)
+		}
+	}
+
+	if req.Request.IssueInstant.Add(MaxIssueDelay).Before(TimeNow()) {
+		return fmt.Errorf("request expired at %s",
+			req.Request.IssueInstant.Add(MaxIssueDelay))
+	}
+	if req.Request.Version != "2.0" {
+		return fmt.Errorf("expected SAML request version 2.0 got %v", req.Request.Version)
+	}
+
+	// find the service provider
+	serviceProviderID := req.Request.Issuer.Value
+	serviceProvider, err := req.IDP.ServiceProviderProvider.GetServiceProvider(req.HTTPRequest, serviceProviderID)
+	if err == os.ErrNotExist {
+		return fmt.Errorf("cannot handle request from unknown service provider %s", serviceProviderID)
+	} else if err != nil {
+		return fmt.Errorf("cannot find service provider %s: %v", serviceProviderID, err)
+	}
+	req.ServiceProviderMetadata = serviceProvider
+
+	// Check that the ACS URL matches an ACS endpoint in the SP metadata.
+	if err := req.getACSEndpoint(); err != nil {
+		return fmt.Errorf("cannot find assertion consumer service: %v", err)
+	}
+
+	return nil
+}
+
+func (req *IdpAuthnRequest) getACSEndpoint() error {
+	if req.Request.AssertionConsumerServiceIndex != "" {
+		for _, spssoDescriptor := range req.ServiceProviderMetadata.SPSSODescriptors {
+			for _, spAssertionConsumerService := range spssoDescriptor.AssertionConsumerServices {
+				if strconv.Itoa(spAssertionConsumerService.Index) == req.Request.AssertionConsumerServiceIndex {
+					req.SPSSODescriptor = &spssoDescriptor
+					req.ACSEndpoint = &spAssertionConsumerService
+					return nil
+				}
+			}
+		}
+	}
+
+	if req.Request.AssertionConsumerServiceURL != "" {
+		for _, spssoDescriptor := range req.ServiceProviderMetadata.SPSSODescriptors {
+			for _, spAssertionConsumerService := range spssoDescriptor.AssertionConsumerServices {
+				if spAssertionConsumerService.Location == req.Request.AssertionConsumerServiceURL {
+					req.SPSSODescriptor = &spssoDescriptor
+					req.ACSEndpoint = &spAssertionConsumerService
+					return nil
+				}
+			}
+		}
+	}
+
+	return os.ErrNotExist // no ACS url found or specified
+}
+
+// DefaultAssertionMaker produces a SAML assertion for the
+// given request and assigns it to req.Assertion.
+type DefaultAssertionMaker struct {
+}
+
+// MakeAssertion implements AssertionMaker. It produces a SAML assertion from the
+// given request and assigns it to req.Assertion.
+func (DefaultAssertionMaker) MakeAssertion(req *IdpAuthnRequest, session *Session) error {
+	attributes := []Attribute{}
+
+	var attributeConsumingService *AttributeConsumingService
+	for _, acs := range req.SPSSODescriptor.AttributeConsumingServices {
+		if acs.IsDefault != nil && *acs.IsDefault {
+			attributeConsumingService = &acs
+			break
+		}
+	}
+	if attributeConsumingService == nil {
+		for _, acs := range req.SPSSODescriptor.AttributeConsumingServices {
+			attributeConsumingService = &acs
+			break
+		}
+	}
+	if attributeConsumingService == nil {
+		attributeConsumingService = &AttributeConsumingService{}
+	}
+
+	for _, requestedAttribute := range attributeConsumingService.RequestedAttributes {
+		if requestedAttribute.NameFormat == "urn:oasis:names:tc:SAML:2.0:attrname-format:basic" || requestedAttribute.NameFormat == "urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified" {
+			attrName := requestedAttribute.Name
+			attrName = regexp.MustCompile("[^A-Za-z0-9]+").ReplaceAllString(attrName, "")
+			switch attrName {
+			case "email", "emailaddress":
+				attributes = append(attributes, Attribute{
+					FriendlyName: requestedAttribute.FriendlyName,
+					Name:         requestedAttribute.Name,
+					NameFormat:   requestedAttribute.NameFormat,
+					Values: []AttributeValue{{
+						Type:  "xs:string",
+						Value: session.UserEmail,
+					}},
+				})
+			case "name", "fullname", "cn", "commonname":
+				attributes = append(attributes, Attribute{
+					FriendlyName: requestedAttribute.FriendlyName,
+					Name:         requestedAttribute.Name,
+					NameFormat:   requestedAttribute.NameFormat,
+					Values: []AttributeValue{{
+						Type:  "xs:string",
+						Value: session.UserCommonName,
+					}},
+				})
+			case "givenname", "firstname":
+				attributes = append(attributes, Attribute{
+					FriendlyName: requestedAttribute.FriendlyName,
+					Name:         requestedAttribute.Name,
+					NameFormat:   requestedAttribute.NameFormat,
+					Values: []AttributeValue{{
+						Type:  "xs:string",
+						Value: session.UserGivenName,
+					}},
+				})
+			case "surname", "lastname", "familyname":
+				attributes = append(attributes, Attribute{
+					FriendlyName: requestedAttribute.FriendlyName,
+					Name:         requestedAttribute.Name,
+					NameFormat:   requestedAttribute.NameFormat,
+					Values: []AttributeValue{{
+						Type:  "xs:string",
+						Value: session.UserSurname,
+					}},
+				})
+			case "uid", "user", "userid":
+				attributes = append(attributes, Attribute{
+					FriendlyName: requestedAttribute.FriendlyName,
+					Name:         requestedAttribute.Name,
+					NameFormat:   requestedAttribute.NameFormat,
+					Values: []AttributeValue{{
+						Type:  "xs:string",
+						Value: session.UserName,
+					}},
+				})
+			}
+		}
+	}
+
+	if session.UserName != "" {
+		attributes = append(attributes, Attribute{
+			FriendlyName: "uid",
+			Name:         "urn:oid:0.9.2342.19200300.100.1.1",
+			NameFormat:   "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
+			Values: []AttributeValue{{
+				Type:  "xs:string",
+				Value: session.UserName,
+			}},
+		})
+	}
+
+	if session.UserEmail != "" {
+		attributes = append(attributes, Attribute{
+			FriendlyName: "eduPersonPrincipalName",
+			Name:         "urn:oid:1.3.6.1.4.1.5923.1.1.1.6",
+			NameFormat:   "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
+			Values: []AttributeValue{{
+				Type:  "xs:string",
+				Value: session.UserEmail,
+			}},
+		})
+	}
+	if session.UserSurname != "" {
+		attributes = append(attributes, Attribute{
+			FriendlyName: "sn",
+			Name:         "urn:oid:2.5.4.4",
+			NameFormat:   "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
+			Values: []AttributeValue{{
+				Type:  "xs:string",
+				Value: session.UserSurname,
+			}},
+		})
+	}
+	if session.UserGivenName != "" {
+		attributes = append(attributes, Attribute{
+			FriendlyName: "givenName",
+			Name:         "urn:oid:2.5.4.42",
+			NameFormat:   "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
+			Values: []AttributeValue{{
+				Type:  "xs:string",
+				Value: session.UserGivenName,
+			}},
+		})
+	}
+
+	if session.UserCommonName != "" {
+		attributes = append(attributes, Attribute{
+			FriendlyName: "cn",
+			Name:         "urn:oid:2.5.4.3",
+			NameFormat:   "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
+			Values: []AttributeValue{{
+				Type:  "xs:string",
+				Value: session.UserCommonName,
+			}},
+		})
+	}
+
+	if len(session.Groups) != 0 {
+		groupMemberAttributeValues := []AttributeValue{}
+		for _, group := range session.Groups {
+			groupMemberAttributeValues = append(groupMemberAttributeValues, AttributeValue{
+				Type:  "xs:string",
+				Value: group,
+			})
+		}
+		attributes = append(attributes, Attribute{
+			FriendlyName: "eduPersonAffiliation",
+			Name:         "urn:oid:1.3.6.1.4.1.5923.1.1.1.1",
+			NameFormat:   "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
+			Values:       groupMemberAttributeValues,
+		})
+	}
+
+	// allow for some clock skew in the validity period using the
+	// issuer's apparent clock.
+	notBefore := TimeNow().Add(-1 * MaxClockSkew)
+	notOnOrAfterAfter := notBefore.Add(MaxClockSkew).Add(MaxIssueDelay)
+	if notBefore.Before(req.Request.IssueInstant) {
+		notBefore = req.Request.IssueInstant
+		notOnOrAfterAfter = notBefore.Add(MaxIssueDelay)
+	}
+
+	req.Assertion = &Assertion{
+		ID:           fmt.Sprintf("id-%x", randomBytes(20)),
+		IssueInstant: TimeNow(),
+		Version:      "2.0",
+		Issuer: Issuer{
+			Format: "urn:oasis:names:tc:SAML:2.0:nameid-format:entity",
+			Value:  req.IDP.Metadata().EntityID,
+		},
+		Subject: &Subject{
+			NameID: &NameID{
+				Format:          "urn:oasis:names:tc:SAML:2.0:nameid-format:transient",
+				NameQualifier:   req.IDP.Metadata().EntityID,
+				SPNameQualifier: req.ServiceProviderMetadata.EntityID,
+				Value:           session.NameID,
+			},
+			SubjectConfirmations: []SubjectConfirmation{
+				SubjectConfirmation{
+					Method: "urn:oasis:names:tc:SAML:2.0:cm:bearer",
+					SubjectConfirmationData: &SubjectConfirmationData{
+						Address:      req.HTTPRequest.RemoteAddr,
+						InResponseTo: req.Request.ID,
+						NotOnOrAfter: TimeNow().Add(MaxIssueDelay),
+						Recipient:    req.ACSEndpoint.Location,
+					},
+				},
+			},
+		},
+		Conditions: &Conditions{
+			NotBefore:    notBefore,
+			NotOnOrAfter: notOnOrAfterAfter,
+			AudienceRestrictions: []AudienceRestriction{
+				AudienceRestriction{
+					Audience: Audience{Value: req.ServiceProviderMetadata.EntityID},
+				},
+			},
+		},
+		AuthnStatements: []AuthnStatement{
+			AuthnStatement{
+				AuthnInstant: session.CreateTime,
+				SessionIndex: session.Index,
+				SubjectLocality: &SubjectLocality{
+					Address: req.HTTPRequest.RemoteAddr,
+				},
+				AuthnContext: AuthnContext{
+					AuthnContextClassRef: &AuthnContextClassRef{
+						Value: "urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport",
+					},
+				},
+			},
+		},
+		AttributeStatements: []AttributeStatement{
+			AttributeStatement{
+				Attributes: attributes,
+			},
+		},
+	}
+
+	return nil
+}
+
+// The Canonicalizer prefix list MUST be empty. Various implementations
+// (maybe ours?) do not appear to support non-empty prefix lists in XML C14N.
+const canonicalizerPrefixList = ""
+
+// MakeAssertionEl sets `AssertionEl` to a signed, possibly encrypted, version of `Assertion`.
+func (req *IdpAuthnRequest) MakeAssertionEl() error {
+	keyPair := tls.Certificate{
+		Certificate: [][]byte{req.IDP.Certificate.Raw},
+		PrivateKey:  req.IDP.Key,
+		Leaf:        req.IDP.Certificate,
+	}
+	keyStore := dsig.TLSCertKeyStore(keyPair)
+
+	signingContext := dsig.NewDefaultSigningContext(keyStore)
+	signingContext.Canonicalizer = dsig.MakeC14N10ExclusiveCanonicalizerWithPrefixList(canonicalizerPrefixList)
+	if err := signingContext.SetSignatureMethod(dsig.RSASHA1SignatureMethod); err != nil {
+		return err
+	}
+
+	assertionEl := req.Assertion.Element()
+
+	signedAssertionEl, err := signingContext.SignEnveloped(assertionEl)
+	if err != nil {
+		return err
+	}
+
+	sigEl := signedAssertionEl.Child[len(signedAssertionEl.Child)-1]
+	req.Assertion.Signature = sigEl.(*etree.Element)
+	signedAssertionEl = req.Assertion.Element()
+
+	certBuf, err := req.getSPEncryptionCert()
+	if err == os.ErrNotExist {
+		req.AssertionEl = signedAssertionEl
+		return nil
+	} else if err != nil {
+		return err
+	}
+
+	var signedAssertionBuf []byte
+	{
+		doc := etree.NewDocument()
+		doc.SetRoot(signedAssertionEl)
+		signedAssertionBuf, err = doc.WriteToBytes()
+		if err != nil {
+			return err
+		}
+	}
+
+	encryptor := xmlenc.OAEP()
+	encryptor.BlockCipher = xmlenc.AES128CBC
+	encryptor.DigestMethod = &xmlenc.SHA1
+	encryptedDataEl, err := encryptor.Encrypt(certBuf, signedAssertionBuf)
+	if err != nil {
+		return err
+	}
+	encryptedDataEl.CreateAttr("Type", "http://www.w3.org/2001/04/xmlenc#Element")
+
+	encryptedAssertionEl := etree.NewElement("saml:EncryptedAssertion")
+	encryptedAssertionEl.AddChild(encryptedDataEl)
+	req.AssertionEl = encryptedAssertionEl
+
+	return nil
+}
+
+// WriteResponse writes the `Response` to the http.ResponseWriter. If
+// `Response` is not already set, it calls MakeResponse to produce it.
+func (req *IdpAuthnRequest) WriteResponse(w http.ResponseWriter) error {
+	if req.ResponseEl == nil {
+		if err := req.MakeResponse(); err != nil {
+			return err
+		}
+	}
+
+	doc := etree.NewDocument()
+	doc.SetRoot(req.ResponseEl)
+	responseBuf, err := doc.WriteToBytes()
+	if err != nil {
+		return err
+	}
+
+	// the only supported binding is the HTTP-POST binding
+	switch req.ACSEndpoint.Binding {
+	case HTTPPostBinding:
+		tmpl := template.Must(template.New("saml-post-form").Parse(`<html>` +
+			`<form method="post" action="{{.URL}}" id="SAMLResponseForm">` +
+			`<input type="hidden" name="SAMLResponse" value="{{.SAMLResponse}}" />` +
+			`<input type="hidden" name="RelayState" value="{{.RelayState}}" />` +
+			`<input id="SAMLSubmitButton" type="submit" value="Continue" />` +
+			`</form>` +
+			`<script>document.getElementById('SAMLSubmitButton').style.visibility='hidden';</script>` +
+			`<script>document.getElementById('SAMLResponseForm').submit();</script>` +
+			`</html>`))
+		data := struct {
+			URL          string
+			SAMLResponse string
+			RelayState   string
+		}{
+			URL:          req.ACSEndpoint.Location,
+			SAMLResponse: base64.StdEncoding.EncodeToString(responseBuf),
+			RelayState:   req.RelayState,
+		}
+
+		buf := bytes.NewBuffer(nil)
+		if err := tmpl.Execute(buf, data); err != nil {
+			return err
+		}
+		if _, err := io.Copy(w, buf); err != nil {
+			return err
+		}
+		return nil
+
+	default:
+		return fmt.Errorf("%s: unsupported binding %s",
+			req.ServiceProviderMetadata.EntityID,
+			req.ACSEndpoint.Binding)
+	}
+}
+
+// getSPEncryptionCert returns the certificate which we can use to encrypt things
+// to the SP in PEM format, or nil if no such certificate is found.
+func (req *IdpAuthnRequest) getSPEncryptionCert() (*x509.Certificate, error) {
+	certStr := ""
+	for _, keyDescriptor := range req.SPSSODescriptor.KeyDescriptors {
+		if keyDescriptor.Use == "encryption" {
+			certStr = keyDescriptor.KeyInfo.Certificate
+			break
+		}
+	}
+
+	// If there are no certs explicitly labeled for encryption, return the first
+	// non-empty cert we find.
+	if certStr == "" {
+		for _, keyDescriptor := range req.SPSSODescriptor.KeyDescriptors {
+			if keyDescriptor.Use == "" && keyDescriptor.KeyInfo.Certificate != "" {
+				certStr = keyDescriptor.KeyInfo.Certificate
+				break
+			}
+		}
+	}
+
+	if certStr == "" {
+		return nil, os.ErrNotExist
+	}
+
+	// cleanup whitespace and re-encode a PEM
+	certStr = regexp.MustCompile(`\s+`).ReplaceAllString(certStr, "")
+	certBytes, err := base64.StdEncoding.DecodeString(certStr)
+	if err != nil {
+		return nil, fmt.Errorf("cannot decode certificate base64: %v", err)
+	}
+	cert, err := x509.ParseCertificate(certBytes)
+	if err != nil {
+		return nil, fmt.Errorf("cannot parse certificate: %v", err)
+	}
+	return cert, nil
+}
+
+// unmarshalEtreeHack parses `el` and sets values in the structure `v`.
+//
+// This is a hack -- it first serializes the element, then uses xml.Unmarshal.
+func unmarshalEtreeHack(el *etree.Element, v interface{}) error {
+	doc := etree.NewDocument()
+	doc.SetRoot(el)
+	buf, err := doc.WriteToBytes()
+	if err != nil {
+		return err
+	}
+	return xml.Unmarshal(buf, v)
+}
+
+// MakeResponse creates and assigns a new SAML response in ResponseEl. `Assertion` must
+// be non-nil. If MakeAssertionEl() has not been called, this function calls it for
+// you.
+func (req *IdpAuthnRequest) MakeResponse() error {
+	if req.AssertionEl == nil {
+		if err := req.MakeAssertionEl(); err != nil {
+			return err
+		}
+	}
+
+	response := &Response{
+		Destination:  req.ACSEndpoint.Location,
+		ID:           fmt.Sprintf("id-%x", randomBytes(20)),
+		InResponseTo: req.Request.ID,
+		IssueInstant: TimeNow(),
+		Version:      "2.0",
+		Issuer: &Issuer{
+			Format: "urn:oasis:names:tc:SAML:2.0:nameid-format:entity",
+			Value:  req.IDP.MetadataURL.String(),
+		},
+		Status: Status{
+			StatusCode: StatusCode{
+				Value: StatusSuccess,
+			},
+		},
+	}
+
+	responseEl := response.Element()
+	responseEl.AddChild(req.AssertionEl) // AssertionEl either an EncryptedAssertion or Assertion element
+
+	// Sign the response element (we've already signed the Assertion element)
+	{
+		keyPair := tls.Certificate{
+			Certificate: [][]byte{req.IDP.Certificate.Raw},
+			PrivateKey:  req.IDP.Key,
+			Leaf:        req.IDP.Certificate,
+		}
+		keyStore := dsig.TLSCertKeyStore(keyPair)
+
+		signingContext := dsig.NewDefaultSigningContext(keyStore)
+		signingContext.Canonicalizer = dsig.MakeC14N10ExclusiveCanonicalizerWithPrefixList(canonicalizerPrefixList)
+		if err := signingContext.SetSignatureMethod(dsig.RSASHA1SignatureMethod); err != nil {
+			return err
+		}
+
+		signedResponseEl, err := signingContext.SignEnveloped(responseEl)
+		if err != nil {
+			return err
+		}
+
+		sigEl := signedResponseEl.ChildElements()[len(signedResponseEl.ChildElements())-1]
+		response.Signature = sigEl
+		responseEl = response.Element()
+		responseEl.AddChild(req.AssertionEl)
+	}
+
+	req.ResponseEl = responseEl
+	return nil
+}
diff --git a/vendor/github.com/crewjam/saml/logger/logger.go b/vendor/github.com/crewjam/saml/logger/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..c211aba60fafef6d006a7b6e821416656e6a2b3a
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/logger/logger.go
@@ -0,0 +1,31 @@
+package logger
+
+import (
+	"log"
+	"os"
+)
+
+// Interface provides the minimal logging interface
+type Interface interface {
+	// Printf prints to the logger using the format.
+	Printf(format string, v ...interface{})
+	// Print prints to the logger.
+	Print(v ...interface{})
+	// Println prints new line.
+	Println(v ...interface{})
+	// Fatal is equivalent to Print() followed by a call to os.Exit(1).
+	Fatal(v ...interface{})
+	// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
+	Fatalf(format string, v ...interface{})
+	// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
+	Fatalln(v ...interface{})
+	// Panic is equivalent to Print() followed by a call to panic().
+	Panic(v ...interface{})
+	// Panicf is equivalent to Printf() followed by a call to panic().
+	Panicf(format string, v ...interface{})
+	// Panicln is equivalent to Println() followed by a call to panic().
+	Panicln(v ...interface{})
+}
+
+// DefaultLogger logs messages to os.Stdout
+var DefaultLogger = log.New(os.Stdout, "", log.LstdFlags)
diff --git a/vendor/github.com/crewjam/saml/metadata.go b/vendor/github.com/crewjam/saml/metadata.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b39a86d91ebdf9bae54ad3ad5f9ee62711e8bed
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/metadata.go
@@ -0,0 +1,286 @@
+package saml
+
+import (
+	"encoding/xml"
+	"time"
+
+	"github.com/beevik/etree"
+)
+
+// HTTPPostBinding is the official URN for the HTTP-POST binding (transport)
+var HTTPPostBinding = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
+
+// HTTPRedirectBinding is the official URN for the HTTP-Redirect binding (transport)
+var HTTPRedirectBinding = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect"
+
+// EntitiesDescriptor represents the SAML object of the same name.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.3.1
+type EntitiesDescriptor struct {
+	XMLName             xml.Name       `xml:"urn:oasis:names:tc:SAML:2.0:metadata EntitiesDescriptor"`
+	ID                  *string        `xml:",attr,omitempty"`
+	ValidUntil          *time.Time     `xml:"validUntil,attr,omitempty"`
+	CacheDuration       *time.Duration `xml:"cacheDuration,attr,omitempty"`
+	Name                *string        `xml:",attr,omitempty"`
+	Signature           *etree.Element
+	EntitiesDescriptors []EntitiesDescriptor `xml:"urn:oasis:names:tc:SAML:2.0:metadata EntitiesDescriptor"`
+	EntityDescriptors   []EntityDescriptor   `xml:"urn:oasis:names:tc:SAML:2.0:metadata EntityDescriptor"`
+}
+
+// Metadata as been renamed to EntityDescriptor
+//
+// This change was made to be consistent with the rest of the API which uses names
+// from the SAML specification for types.
+//
+// This is a tombstone to help you discover this fact. You should update references
+// to saml.Metadata to be saml.EntityDescriptor.
+var Metadata = struct{}{}
+
+// EntityDescriptor represents the SAML EntityDescriptor object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.3.2
+type EntityDescriptor struct {
+	XMLName                       xml.Name      `xml:"urn:oasis:names:tc:SAML:2.0:metadata EntityDescriptor"`
+	EntityID                      string        `xml:"entityID,attr"`
+	ID                            string        `xml:",attr,omitempty"`
+	ValidUntil                    time.Time     `xml:"validUntil,attr,omitempty"`
+	CacheDuration                 time.Duration `xml:"cacheDuration,attr,omitempty"`
+	Signature                     *etree.Element
+	RoleDescriptors               []RoleDescriptor               `xml:"RoleDescriptor"`
+	IDPSSODescriptors             []IDPSSODescriptor             `xml:"IDPSSODescriptor"`
+	SPSSODescriptors              []SPSSODescriptor              `xml:"SPSSODescriptor"`
+	AuthnAuthorityDescriptors     []AuthnAuthorityDescriptor     `xml:"AuthnAuthorityDescriptor"`
+	AttributeAuthorityDescriptors []AttributeAuthorityDescriptor `xml:"AttributeAuthorityDescriptor"`
+	PDPDescriptors                []PDPDescriptor                `xml:"PDPDescriptor"`
+	AffiliationDescriptor         *AffiliationDescriptor
+	Organization                  *Organization
+	ContactPerson                 *ContactPerson
+	AdditionalMetadataLocations   []string `xml:"AdditionalMetadataLocation"`
+}
+
+// MarshalXML implements xml.Marshaler
+func (m EntityDescriptor) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+	type Alias EntityDescriptor
+	aux := &struct {
+		ValidUntil    RelaxedTime `xml:"validUntil,attr,omitempty"`
+		CacheDuration Duration    `xml:"cacheDuration,attr,omitempty"`
+		*Alias
+	}{
+		ValidUntil:    RelaxedTime(m.ValidUntil),
+		CacheDuration: Duration(m.CacheDuration),
+		Alias:         (*Alias)(&m),
+	}
+	return e.Encode(aux)
+}
+
+// UnmarshalXML implements xml.Unmarshaler
+func (m *EntityDescriptor) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+	type Alias EntityDescriptor
+	aux := &struct {
+		ValidUntil    RelaxedTime `xml:"validUntil,attr,omitempty"`
+		CacheDuration Duration    `xml:"cacheDuration,attr,omitempty"`
+		*Alias
+	}{
+		Alias: (*Alias)(m),
+	}
+	if err := d.DecodeElement(aux, &start); err != nil {
+		return err
+	}
+	m.ValidUntil = time.Time(aux.ValidUntil)
+	m.CacheDuration = time.Duration(aux.CacheDuration)
+	return nil
+}
+
+// Organization represents the SAML Organization object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.3.2.1
+type Organization struct {
+	OrganizationNames        []LocalizedName `xml:"OrganizationName"`
+	OrganizationDisplayNames []LocalizedName `xml:"OrganizationDisplayName"`
+	OrganizationURLs         []LocalizedURI  `xml:"OrganizationURL"`
+}
+
+// LocalizedName represents the SAML type localizedNameType.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.2.4
+type LocalizedName struct {
+	Lang  string `xml:"xml lang,attr"`
+	Value string `xml:",chardata"`
+}
+
+// LocalizedURI represents the SAML type localizedURIType.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.2.5
+type LocalizedURI struct {
+	Lang  string `xml:"xml lang,attr"`
+	Value string `xml:",chardata"`
+}
+
+// ContactPerson represents the SAML element ContactPerson.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.3.2.2
+type ContactPerson struct {
+	ContactType      string `xml:"contactType,attr"`
+	Company          string
+	GivenName        string
+	SurName          string
+	EmailAddresses   []string `xml:"EmailAddress"`
+	TelephoneNumbers []string `xml:"TelephoneNumber"`
+}
+
+// RoleDescriptor represents the SAML element RoleDescriptor.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.1
+type RoleDescriptor struct {
+	ID                         string        `xml:",attr,omitempty"`
+	ValidUntil                 time.Time     `xml:"validUntil,attr,omitempty"`
+	CacheDuration              time.Duration `xml:"cacheDuration,attr,omitempty"`
+	ProtocolSupportEnumeration string        `xml:"protocolSupportEnumeration,attr"`
+	ErrorURL                   string        `xml:"errorURL,attr,omitempty"`
+	Signature                  *etree.Element
+	KeyDescriptors             []KeyDescriptor `xml:"KeyDescriptor,omitempty"`
+	Organization               *Organization   `xml:"Organization,omitempty"`
+	ContactPeople              []ContactPerson `xml:"ContactPerson,omitempty"`
+}
+
+// KeyDescriptor represents the XMLSEC object of the same name
+type KeyDescriptor struct {
+	Use               string             `xml:"use,attr"`
+	KeyInfo           KeyInfo            `xml:"http://www.w3.org/2000/09/xmldsig# KeyInfo"`
+	EncryptionMethods []EncryptionMethod `xml:"EncryptionMethod"`
+}
+
+// EncryptionMethod represents the XMLSEC object of the same name
+type EncryptionMethod struct {
+	Algorithm string `xml:"Algorithm,attr"`
+}
+
+// KeyInfo represents the XMLSEC object of the same name
+//
+// TODO(ross): revisit xmldsig and make this type more complete
+type KeyInfo struct {
+	XMLName     xml.Name `xml:"http://www.w3.org/2000/09/xmldsig# KeyInfo"`
+	Certificate string   `xml:"X509Data>X509Certificate"`
+}
+
+// Endpoint represents the SAML EndpointType object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.2.2
+type Endpoint struct {
+	Binding          string `xml:"Binding,attr"`
+	Location         string `xml:"Location,attr"`
+	ResponseLocation string `xml:"ResponseLocation,attr,omitempty"`
+}
+
+// IndexedEndpoint represents the SAML IndexedEndpointType object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.2.3
+type IndexedEndpoint struct {
+	Binding          string  `xml:"Binding,attr"`
+	Location         string  `xml:"Location,attr"`
+	ResponseLocation *string `xml:"ResponseLocation,attr,omitempty"`
+	Index            int     `xml:"index,attr"`
+	IsDefault        *bool   `xml:"isDefault,attr"`
+}
+
+// SSODescriptor represents the SAML complex type SSODescriptor
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.2
+type SSODescriptor struct {
+	RoleDescriptor
+	ArtifactResolutionServices []IndexedEndpoint `xml:"ArtifactResolutionService"`
+	SingleLogoutServices       []Endpoint        `xml:"SingleLogoutService"`
+	ManageNameIDServices       []Endpoint        `xml:"ManageNameIDService"`
+	NameIDFormats              []NameIDFormat    `xml:"NameIDFormat"`
+}
+
+// IDPSSODescriptor represents the SAML IDPSSODescriptorType object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.3
+type IDPSSODescriptor struct {
+	XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:metadata IDPSSODescriptor"`
+	SSODescriptor
+	WantAuthnRequestsSigned *bool `xml:",attr"`
+
+	SingleSignOnServices       []Endpoint  `xml:"SingleSignOnService"`
+	NameIDMappingServices      []Endpoint  `xml:"NameIDMappingService"`
+	AssertionIDRequestServices []Endpoint  `xml:"AssertionIDRequestService"`
+	AttributeProfiles          []string    `xml:"AttributeProfile"`
+	Attributes                 []Attribute `xml:"Attribute"`
+}
+
+// SPSSODescriptor represents the SAML SPSSODescriptorType object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.2
+type SPSSODescriptor struct {
+	XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:metadata SPSSODescriptor"`
+	SSODescriptor
+	AuthnRequestsSigned        *bool                       `xml:",attr"`
+	WantAssertionsSigned       *bool                       `xml:",attr"`
+	AssertionConsumerServices  []IndexedEndpoint           `xml:"AssertionConsumerService"`
+	AttributeConsumingServices []AttributeConsumingService `xml:"AttributeConsumingService"`
+}
+
+// AttributeConsumingService represents the SAML AttributeConsumingService object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.4.1
+type AttributeConsumingService struct {
+	Index               int                  `xml:"index,attr"`
+	IsDefault           *bool                `xml:"isDefault,attr"`
+	ServiceNames        []LocalizedName      `xml:"ServiceName"`
+	ServiceDescriptions []LocalizedName      `xml:"ServiceDescription"`
+	RequestedAttributes []RequestedAttribute `xml:"RequestedAttribute"`
+}
+
+// RequestedAttribute represents the SAML RequestedAttribute object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.4.2
+type RequestedAttribute struct {
+	Attribute
+	IsRequired *bool `xml:"isRequired,attr"`
+}
+
+// AuthnAuthorityDescriptor represents the SAML AuthnAuthorityDescriptor object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.5
+type AuthnAuthorityDescriptor struct {
+	RoleDescriptor
+	AuthnQueryServices         []Endpoint     `xml:"AuthnQueryService"`
+	AssertionIDRequestServices []Endpoint     `xml:"AssertionIDRequestService"`
+	NameIDFormats              []NameIDFormat `xml:"NameIDFormat"`
+}
+
+// PDPDescriptor represents the SAML PDPDescriptor object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.6
+type PDPDescriptor struct {
+	RoleDescriptor
+	AuthzServices              []Endpoint     `xml:"AuthzService"`
+	AssertionIDRequestServices []Endpoint     `xml:"AssertionIDRequestService"`
+	NameIDFormats              []NameIDFormat `xml:"NameIDFormat"`
+}
+
+// AttributeAuthorityDescriptor represents the SAML AttributeAuthorityDescriptor object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.4.7
+type AttributeAuthorityDescriptor struct {
+	RoleDescriptor
+	AttributeServices          []Endpoint     `xml:"AttributeService"`
+	AssertionIDRequestServices []Endpoint     `xml:"AssertionIDRequestService"`
+	NameIDFormats              []NameIDFormat `xml:"NameIDFormat"`
+	AttributeProfiles          []string       `xml:"AttributeProfile"`
+	Attributes                 []Attribute    `xml:"Attribute"`
+}
+
+// AffiliationDescriptor represents the SAML AffiliationDescriptor object.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf §2.5
+type AffiliationDescriptor struct {
+	AffiliationOwnerID string        `xml:"affiliationOwnerID,attr"`
+	ID                 string        `xml:",attr"`
+	ValidUntil         time.Time     `xml:"validUntil,attr,omitempty"`
+	CacheDuration      time.Duration `xml:"cacheDuration,attr"`
+	Signature          *etree.Element
+	AffiliateMembers   []string        `xml:"AffiliateMember"`
+	KeyDescriptors     []KeyDescriptor `xml:"KeyDescriptor"`
+}
diff --git a/vendor/github.com/crewjam/saml/saml.go b/vendor/github.com/crewjam/saml/saml.go
new file mode 100644
index 0000000000000000000000000000000000000000..11f184b05e12cc03c392e77e7869deccc7a04991
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/saml.go
@@ -0,0 +1,113 @@
+// Package saml contains a partial implementation of the SAML standard in golang.
+// SAML is a standard for identity federation, i.e. either allowing a third party to authenticate your users or allowing third parties to rely on us to authenticate their users.
+//
+// In SAML parlance an Identity Provider (IDP) is a service that knows how to authenticate users. A Service Provider (SP) is a service that delegates authentication to an IDP. If you are building a service where users log in with someone else's credentials, then you are a Service Provider. This package supports implementing both service providers and identity providers.
+//
+// The core package contains the implementation of SAML. The package samlsp provides helper middleware suitable for use in Service Provider applications. The package samlidp provides a rudimentary IDP service that is useful for testing or as a starting point for other integrations.
+//
+// Getting Started as a Service Provider
+//
+// Let us assume we have a simple web appliation to protect. We'll modify this application so it uses SAML to authenticate users.
+//
+//     package main
+//
+//     import "net/http"
+//
+//      func hello(w http.ResponseWriter, r *http.Request) {
+//         fmt.Fprintf(w, "Hello, World!")
+//     })
+//
+//     func main() {
+//         app := http.HandlerFunc(hello)
+//         http.Handle("/hello", app)
+//         http.ListenAndServe(":8000", nil)
+//     }
+//
+// Each service provider must have an self-signed X.509 key pair established. You can generate your own with something like this:
+//
+//     openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com"
+//
+// We will use `samlsp.Middleware` to wrap the endpoint we want to protect. Middleware provides both an `http.Handler` to serve the SAML specific URLs and a set of wrappers to require the user to be logged in. We also provide the URL where the service provider can fetch the metadata from the IDP at startup. In our case, we'll use [testshib.org](testshib.org), an identity provider designed for testing.
+//
+//     package main
+//
+//     import (
+//         "fmt"
+//         "io/ioutil"
+//         "net/http"
+//
+//         "github.com/crewjam/saml/samlsp"
+//     )
+//
+//     func hello(w http.ResponseWriter, r *http.Request) {
+//         fmt.Fprintf(w, "Hello, %s!", r.Header.Get("X-Saml-Cn"))
+//     }
+//
+//     func main() {
+//         key, _ := ioutil.ReadFile("myservice.key")
+//         cert, _ := ioutil.ReadFile("myservice.cert")
+//         samlSP, _ := samlsp.New(samlsp.Options{
+//             IDPMetadataURL: "https://www.testshib.org/metadata/testshib-providers.xml",
+//             URL:            "http://localhost:8000",
+//             Key:            string(key),
+//             Certificate:    string(cert),
+//         })
+//         app := http.HandlerFunc(hello)
+//         http.Handle("/hello", samlSP.RequireAccount(app))
+//         http.Handle("/saml/", samlSP)
+//         http.ListenAndServe(":8000", nil)
+//     }
+//
+//
+// Next we'll have to register our service provider with the identiy provider to establish trust from the service provider to the IDP. For [testshib.org](testshib.org), you can do something like:
+//
+//     mdpath=saml-test-$USER-$HOST.xml
+//     curl localhost:8000/saml/metadata > $mdpath
+//     curl -i -F userfile=@$mdpath https://www.testshib.org/procupload.php
+//
+// Now you should be able to authenticate. The flow should look like this:
+//
+// 1. You browse to `localhost:8000/hello`
+//
+// 2. The middleware redirects you to `https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO`
+//
+// 3. testshib.org prompts you for a username and password.
+//
+// 4. testshib.org returns you an HTML document which contains an HTML form setup to POST to `localhost:8000/saml/acs`. The form is automatically submitted if you have javascript enabled.
+//
+// 5. The local service validates the response, issues a session cookie, and redirects you to the original URL, `localhost:8000/hello`.
+//
+// 6. This time when `localhost:8000/hello` is requested there is a valid session and so the main content is served.
+//
+// Getting Started as an Identity Provider
+//
+// Please see `examples/idp/` for a substantially complete example of how to use the library and helpers to be an identity provider.
+//
+// Support
+//
+// The SAML standard is huge and complex with many dark corners and strange, unused features. This package implements the most commonly used subset of these features required to provide a single sign on experience. The package supports at least the subset of SAML known as [interoperable SAML](http://saml2int.org).
+//
+// This package supports the Web SSO profile. Message flows from the service provider to the IDP are supported using the HTTP Redirect binding and the HTTP POST binding. Message flows fromthe IDP to the service provider are supported vai the HTTP POST binding.
+//
+// The package supports signed and encrypted SAML assertions. It does not support signed or encrypted requests.
+//
+// RelayState
+//
+// The *RelayState* parameter allows you to pass user state information across the authentication flow. The most common use for this is to allow a user to request a deep link into your site, be redirected through the SAML login flow, and upon successful completion, be directed to the originally requested link, rather than the root.
+//
+// Unfortunately, *RelayState* is less useful than it could be. Firstly, it is not authenticated, so anything you supply must be signed to avoid XSS or CSRF. Secondly, it is limited to 80 bytes in length, which precludes signing. (See section 3.6.3.1 of SAMLProfiles.)
+//
+// References
+//
+// The SAML specification is a collection of PDFs (sadly):
+//
+// - [SAMLCore](http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf) defines data types.
+//
+// - [SAMLBindings](http://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf) defines the details of the HTTP requests in play.
+//
+// - [SAMLProfiles](http://docs.oasis-open.org/security/saml/v2.0/saml-profiles-2.0-os.pdf) describes data flows.
+//
+// - [SAMLConformance](http://docs.oasis-open.org/security/saml/v2.0/saml-conformance-2.0-os.pdf) includes a support matrix for various parts of the protocol.
+//
+// [TestShib](http://www.testshib.org/) is a testing ground for SAML service and identity providers.
+package saml
diff --git a/vendor/github.com/crewjam/saml/saml_gen.go b/vendor/github.com/crewjam/saml/saml_gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..c2bdb136241d9260cb3a19d4689e4119a5fe6938
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/saml_gen.go
@@ -0,0 +1,3 @@
+package saml
+
+//go:generate bash -c "(cat README.md | sed 's|^## ||g' | sed 's|\\*\\*||g' | sed 's|^|// |g'; echo 'package saml') > saml.go"
diff --git a/vendor/github.com/crewjam/saml/schema.go b/vendor/github.com/crewjam/saml/schema.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ce5471c7e18912d038a0b12561d907658cedd2a
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/schema.go
@@ -0,0 +1,931 @@
+package saml
+
+import (
+	"encoding/xml"
+	"strconv"
+	"time"
+
+	"github.com/beevik/etree"
+	"github.com/russellhaering/goxmldsig/etreeutils"
+)
+
+// AuthnRequest represents the SAML object of the same name, a request from a service provider
+// to authenticate a user.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
+type AuthnRequest struct {
+	XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol AuthnRequest"`
+
+	ID           string    `xml:",attr"`
+	Version      string    `xml:",attr"`
+	IssueInstant time.Time `xml:",attr"`
+	Destination  string    `xml:",attr"`
+	Consent      string    `xml:",attr"`
+	Issuer       *Issuer   `xml:"urn:oasis:names:tc:SAML:2.0:assertion Issuer"`
+	Signature    *etree.Element
+
+	Subject      *Subject
+	NameIDPolicy *NameIDPolicy `xml:"urn:oasis:names:tc:SAML:2.0:protocol NameIDPolicy"`
+	Conditions   *Conditions
+	//RequestedAuthnContext *RequestedAuthnContext // TODO
+	//Scoping               *Scoping // TODO
+
+	ForceAuthn                     *bool  `xml:",attr"`
+	IsPassive                      *bool  `xml:",attr"`
+	AssertionConsumerServiceIndex  string `xml:",attr"`
+	AssertionConsumerServiceURL    string `xml:",attr"`
+	ProtocolBinding                string `xml:",attr"`
+	AttributeConsumingServiceIndex string `xml:",attr"`
+	ProviderName                   string `xml:",attr"`
+}
+
+// Element returns an etree.Element representing the object
+// Element returns an etree.Element representing the object in XML form.
+func (r *AuthnRequest) Element() *etree.Element {
+	el := etree.NewElement("samlp:AuthnRequest")
+	el.CreateAttr("xmlns:saml", "urn:oasis:names:tc:SAML:2.0:assertion")
+	el.CreateAttr("xmlns:samlp", "urn:oasis:names:tc:SAML:2.0:protocol")
+	el.CreateAttr("ID", r.ID)
+	el.CreateAttr("Version", r.Version)
+	el.CreateAttr("IssueInstant", r.IssueInstant.Format(timeFormat))
+	if r.Destination != "" {
+		el.CreateAttr("Destination", r.Destination)
+	}
+	if r.Consent != "" {
+		el.CreateAttr("Consent", r.Consent)
+	}
+	if r.Issuer != nil {
+		el.AddChild(r.Issuer.Element())
+	}
+	if r.Signature != nil {
+		el.AddChild(r.Signature)
+	}
+	if r.Subject != nil {
+		el.AddChild(r.Subject.Element())
+	}
+	if r.NameIDPolicy != nil {
+		el.AddChild(r.NameIDPolicy.Element())
+	}
+	if r.Conditions != nil {
+		el.AddChild(r.Conditions.Element())
+	}
+	//if r.RequestedAuthnContext != nil {
+	//	el.AddChild(r.RequestedAuthnContext.Element())
+	//}
+	//if r.Scoping != nil {
+	//	el.AddChild(r.Scoping.Element())
+	//}
+	if r.ForceAuthn != nil {
+		el.CreateAttr("ForceAuthn", strconv.FormatBool(*r.ForceAuthn))
+	}
+	if r.IsPassive != nil {
+		el.CreateAttr("IsPassive", strconv.FormatBool(*r.IsPassive))
+	}
+	if r.AssertionConsumerServiceIndex != "" {
+		el.CreateAttr("AssertionConsumerServiceIndex", r.AssertionConsumerServiceIndex)
+	}
+	if r.AssertionConsumerServiceURL != "" {
+		el.CreateAttr("AssertionConsumerServiceURL", r.AssertionConsumerServiceURL)
+	}
+	if r.ProtocolBinding != "" {
+		el.CreateAttr("ProtocolBinding", r.ProtocolBinding)
+	}
+	if r.AttributeConsumingServiceIndex != "" {
+		el.CreateAttr("AttributeConsumingServiceIndex", r.AttributeConsumingServiceIndex)
+	}
+	if r.ProviderName != "" {
+		el.CreateAttr("ProviderName", r.ProviderName)
+	}
+	return el
+}
+
+// MarshalXML implements xml.Marshaler
+func (a *AuthnRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+	type Alias AuthnRequest
+	aux := &struct {
+		IssueInstant RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		IssueInstant: RelaxedTime(a.IssueInstant),
+		Alias:        (*Alias)(a),
+	}
+	return e.Encode(aux)
+}
+
+// UnmarshalXML implements xml.Unmarshaler
+func (a *AuthnRequest) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+	type Alias AuthnRequest
+	aux := &struct {
+		IssueInstant RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		Alias: (*Alias)(a),
+	}
+	if err := d.DecodeElement(&aux, &start); err != nil {
+		return err
+	}
+	a.IssueInstant = time.Time(aux.IssueInstant)
+	return nil
+}
+
+// Issuer represents the SAML object of the same name.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
+type Issuer struct {
+	XMLName         xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Issuer"`
+	NameQualifier   string   `xml:",attr"`
+	SPNameQualifier string   `xml:",attr"`
+	Format          string   `xml:",attr"`
+	SPProvidedID    string   `xml:",attr"`
+	Value           string   `xml:",chardata"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *Issuer) Element() *etree.Element {
+	el := etree.NewElement("saml:Issuer")
+	if a.NameQualifier != "" {
+		el.CreateAttr("NameQualifier", a.NameQualifier)
+	}
+	if a.SPNameQualifier != "" {
+		el.CreateAttr("SPNameQualifier", a.SPNameQualifier)
+	}
+	if a.Format != "" {
+		el.CreateAttr("Format", a.Format)
+	}
+	if a.SPProvidedID != "" {
+		el.CreateAttr("SPProvidedID", a.SPProvidedID)
+	}
+	el.SetText(a.Value)
+	return el
+}
+
+// NameIDPolicy represents the SAML object of the same name.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
+type NameIDPolicy struct {
+	XMLName         xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol NameIDPolicy"`
+	Format          *string  `xml:",attr"`
+	SPNameQualifier *string  `xml:",attr"`
+	AllowCreate     *bool    `xml:",attr"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *NameIDPolicy) Element() *etree.Element {
+	el := etree.NewElement("samlp:NameIDPolicy")
+	if a.Format != nil {
+		el.CreateAttr("Format", *a.Format)
+	}
+	if a.SPNameQualifier != nil {
+		el.CreateAttr("SPNameQualifier", *a.SPNameQualifier)
+	}
+	if a.AllowCreate != nil {
+		el.CreateAttr("AllowCreate", strconv.FormatBool(*a.AllowCreate))
+	}
+	return el
+}
+
+// Response represents the SAML object of the same name.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
+type Response struct {
+	XMLName      xml.Name  `xml:"urn:oasis:names:tc:SAML:2.0:protocol Response"`
+	ID           string    `xml:",attr"`
+	InResponseTo string    `xml:",attr"`
+	Version      string    `xml:",attr"`
+	IssueInstant time.Time `xml:",attr"`
+	Destination  string    `xml:",attr"`
+	Consent      string    `xml:",attr"`
+	Issuer       *Issuer   `xml:"urn:oasis:names:tc:SAML:2.0:assertion Issuer"`
+	Signature    *etree.Element
+	Status       Status `xml:"urn:oasis:names:tc:SAML:2.0:protocol Status"`
+
+	// TODO(ross): more than one EncryptedAssertion is allowed
+	EncryptedAssertion *etree.Element `xml:"urn:oasis:names:tc:SAML:2.0:assertion EncryptedAssertion"`
+
+	// TODO(ross): more than one Assertion is allowed
+	Assertion *Assertion `xml:"urn:oasis:names:tc:SAML:2.0:assertion Assertion"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (r *Response) Element() *etree.Element {
+	el := etree.NewElement("samlp:Response")
+	el.CreateAttr("xmlns:saml", "urn:oasis:names:tc:SAML:2.0:assertion")
+	el.CreateAttr("xmlns:samlp", "urn:oasis:names:tc:SAML:2.0:protocol")
+
+	// Note: This namespace is not used by any element or attribute name, but
+	// is required so that the AttributeValue type element can have a value like
+	// "xs:string". If we don't declare it here, then it will be stripped by the
+	// cannonicalizer. This could be avoided by providing a prefix list to the
+	// cannonicalizer, but prefix lists do not appear to be implemented correctly
+	// in some libraries, so the safest action is to always produce XML that is
+	// (a) in cannonical form and (b) does not require prefix lists.
+	el.CreateAttr("xmlns:xs", "http://www.w3.org/2001/XMLSchema")
+
+	el.CreateAttr("ID", r.ID)
+	if r.InResponseTo != "" {
+		el.CreateAttr("InResponseTo", r.InResponseTo)
+	}
+	el.CreateAttr("Version", r.Version)
+	el.CreateAttr("IssueInstant", r.IssueInstant.Format(timeFormat))
+	if r.Destination != "" {
+		el.CreateAttr("Destination", r.Destination)
+	}
+	if r.Consent != "" {
+		el.CreateAttr("Consent", r.Consent)
+	}
+	if r.Issuer != nil {
+		el.AddChild(r.Issuer.Element())
+	}
+	if r.Signature != nil {
+		el.AddChild(r.Signature)
+	}
+	el.AddChild(r.Status.Element())
+	if r.EncryptedAssertion != nil {
+		el.AddChild(r.EncryptedAssertion)
+	}
+	if r.Assertion != nil {
+		el.AddChild(r.Assertion.Element())
+	}
+	return el
+}
+
+// MarshalXML implements xml.Marshaler
+func (r *Response) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+	type Alias Response
+	aux := &struct {
+		IssueInstant RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		IssueInstant: RelaxedTime(r.IssueInstant),
+		Alias:        (*Alias)(r),
+	}
+	return e.Encode(aux)
+}
+
+// UnmarshalXML implements xml.Unmarshaler
+func (r *Response) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+	type Alias Response
+	aux := &struct {
+		IssueInstant RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		Alias: (*Alias)(r),
+	}
+	if err := d.DecodeElement(&aux, &start); err != nil {
+		return err
+	}
+	r.IssueInstant = time.Time(aux.IssueInstant)
+	return nil
+}
+
+// Status represents the SAML object of the same name.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
+type Status struct {
+	XMLName       xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol Status"`
+	StatusCode    StatusCode
+	StatusMessage *StatusMessage
+	StatusDetail  *StatusDetail
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (s *Status) Element() *etree.Element {
+	el := etree.NewElement("samlp:Status")
+	el.AddChild(s.StatusCode.Element())
+	if s.StatusMessage != nil {
+		el.AddChild(s.StatusMessage.Element())
+	}
+	if s.StatusDetail != nil {
+		el.AddChild(s.StatusDetail.Element())
+	}
+	return el
+}
+
+// StatusCode represents the SAML object of the same name.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
+type StatusCode struct {
+	XMLName    xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol StatusCode"`
+	Value      string   `xml:",attr"`
+	StatusCode *StatusCode
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (s *StatusCode) Element() *etree.Element {
+	el := etree.NewElement("samlp:StatusCode")
+	el.CreateAttr("Value", s.Value)
+	if s.StatusCode != nil {
+		el.AddChild(s.StatusCode.Element())
+	}
+	return el
+}
+
+// StatusSuccess means the request succeeded. Additional information MAY be returned in the <StatusMessage> and/or <StatusDetail> elements.
+//
+// TODO(ross): this value is mostly constant, but is mutated in tests. Fix the hacky test so this can be const.
+var StatusSuccess = "urn:oasis:names:tc:SAML:2.0:status:Success"
+
+const (
+	// The permissible top-level <StatusCode> values are as follows:
+
+	// StatusRequester means the request could not be performed due to an error on the part of the requester.
+	StatusRequester = "urn:oasis:names:tc:SAML:2.0:status:Requester"
+
+	// StatusResponder means the request could not be performed due to an error on the part of the SAML responder or SAML authority.
+	StatusResponder = "urn:oasis:names:tc:SAML:2.0:status:Responder"
+
+	// StatusVersionMismatch means the SAML responder could not process the request because the version of the request message was incorrect.
+	StatusVersionMismatch = "urn:oasis:names:tc:SAML:2.0:status:VersionMismatch"
+
+	// The following second-level status codes are referenced at various places in this specification. Additional
+	// second-level status codes MAY be defined in future versions of the SAML specification. System entities
+	// are free to define more specific status codes by defining appropriate URI references.
+
+	// StatusAuthnFailed means the responding provider was unable to successfully authenticate the principal.
+	StatusAuthnFailed = "urn:oasis:names:tc:SAML:2.0:status:AuthnFailed"
+
+	// StatusInvalidAttrNameOrValue means Unexpected or invalid content was encountered within a <saml:Attribute> or <saml:AttributeValue> element.
+	StatusInvalidAttrNameOrValue = "urn:oasis:names:tc:SAML:2.0:status:InvalidAttrNameOrValue"
+
+	// StatusInvalidNameIDPolicy means the responding provider cannot or will not support the requested name identifier policy.
+	StatusInvalidNameIDPolicy = "urn:oasis:names:tc:SAML:2.0:status:InvalidNameIDPolicy"
+
+	// StatusNoAuthnContext means the specified authentication context requirements cannot be met by the responder.
+	StatusNoAuthnContext = "urn:oasis:names:tc:SAML:2.0:status:NoAuthnContext"
+
+	// StatusNoAvailableIDP is used by an intermediary to indicate that none of the supported identity provider <Loc> elements in an <IDPList> can be resolved or that none of the supported identity providers are available.
+	StatusNoAvailableIDP = "urn:oasis:names:tc:SAML:2.0:status:NoAvailableIDP"
+
+	// StatusNoPassive means Indicates the responding provider cannot authenticate the principal passively, as has been requested.
+	StatusNoPassive = "urn:oasis:names:tc:SAML:2.0:status:NoPassive"
+
+	// StatusNoSupportedIDP is used by an intermediary to indicate that none of the identity providers in an <IDPList> are supported by the intermediary.
+	StatusNoSupportedIDP = "urn:oasis:names:tc:SAML:2.0:status:NoSupportedIDP"
+
+	// StatusPartialLogout is used by a session authority to indicate to a session participant that it was not able to propagate logout to all other session participants.
+	StatusPartialLogout = "urn:oasis:names:tc:SAML:2.0:status:PartialLogout"
+
+	// StatusProxyCountExceeded means Indicates that a responding provider cannot authenticate the principal directly and is not permitted to proxy the request further.
+	StatusProxyCountExceeded = "urn:oasis:names:tc:SAML:2.0:status:ProxyCountExceeded"
+
+	// StatusRequestDenied means the SAML responder or SAML authority is able to process the request but has chosen not to respond. This status code MAY be used when there is concern about the security context of the request message or the sequence of request messages received from a particular requester.
+	StatusRequestDenied = "urn:oasis:names:tc:SAML:2.0:status:RequestDenied"
+
+	// StatusRequestUnsupported means the SAML responder or SAML authority does not support the request.
+	StatusRequestUnsupported = "urn:oasis:names:tc:SAML:2.0:status:RequestUnsupported"
+
+	// StatusRequestVersionDeprecated means the SAML responder cannot process any requests with the protocol version specified in the request.
+	StatusRequestVersionDeprecated = "urn:oasis:names:tc:SAML:2.0:status:RequestVersionDeprecated"
+
+	// StatusRequestVersionTooHigh means the SAML responder cannot process the request because the protocol version specified in the request message is a major upgrade from the highest protocol version supported by the responder.
+	StatusRequestVersionTooHigh = "urn:oasis:names:tc:SAML:2.0:status:RequestVersionTooHigh"
+
+	// StatusRequestVersionTooLow means the SAML responder cannot process the request because the protocol version specified in the request message is too low.
+	StatusRequestVersionTooLow = "urn:oasis:names:tc:SAML:2.0:status:RequestVersionTooLow"
+
+	// StatusResourceNotRecognized means the resource value provided in the request message is invalid or unrecognized.
+	StatusResourceNotRecognized = "urn:oasis:names:tc:SAML:2.0:status:ResourceNotRecognized"
+
+	// StatusTooManyResponses means the response message would contain more elements than the SAML responder is able to return.
+	StatusTooManyResponses = "urn:oasis:names:tc:SAML:2.0:status:TooManyResponses"
+
+	// StatusUnknownAttrProfile means an entity that has no knowledge of a particular attribute profile has been presented with an attribute means drawn from that profile.
+	StatusUnknownAttrProfile = "urn:oasis:names:tc:SAML:2.0:status:UnknownAttrProfile"
+
+	// StatusUnknownPrincipal means the responding provider does not recognize the principal specified or implied by the request.
+	StatusUnknownPrincipal = "urn:oasis:names:tc:SAML:2.0:status:UnknownPrincipal"
+
+	// StatusUnsupportedBinding means the SAML responder cannot properly fulfill the request using the protocol binding specified in the request.
+	StatusUnsupportedBinding = "urn:oasis:names:tc:SAML:2.0:status:UnsupportedBinding"
+)
+
+// StatusMessage represents the SAML element StatusMessage.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §3.2.2.3
+type StatusMessage struct {
+	Value string
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (sm StatusMessage) Element() *etree.Element {
+	el := etree.NewElement("samlp:StatusMessage")
+	el.SetText(sm.Value)
+	return el
+}
+
+// StatusDetail represents the SAML element StatusDetail.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §3.2.2.4
+type StatusDetail struct {
+	Children []*etree.Element
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (sm StatusDetail) Element() *etree.Element {
+	el := etree.NewElement("samlp:StatusDetail")
+	for _, child := range sm.Children {
+		el.AddChild(child)
+	}
+	return el
+}
+
+// Assertion represents the SAML element Assertion.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.3.3
+type Assertion struct {
+	XMLName      xml.Name  `xml:"urn:oasis:names:tc:SAML:2.0:assertion Assertion"`
+	ID           string    `xml:",attr"`
+	IssueInstant time.Time `xml:",attr"`
+	Version      string    `xml:",attr"`
+	Issuer       Issuer    `xml:"urn:oasis:names:tc:SAML:2.0:assertion Issuer"`
+	Signature    *etree.Element
+	Subject      *Subject
+	Conditions   *Conditions
+	// Advice *Advice
+	// Statements []Statement
+	AuthnStatements []AuthnStatement `xml:"AuthnStatement"`
+	// AuthzDecisionStatements []AuthzDecisionStatement
+	AttributeStatements []AttributeStatement `xml:"AttributeStatement"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *Assertion) Element() *etree.Element {
+	el := etree.NewElement("saml:Assertion")
+	el.CreateAttr("xmlns:saml", "urn:oasis:names:tc:SAML:2.0:assertion")
+	el.CreateAttr("Version", "2.0")
+	el.CreateAttr("ID", a.ID)
+	el.CreateAttr("IssueInstant", a.IssueInstant.Format(timeFormat))
+	el.AddChild(a.Issuer.Element())
+	if a.Signature != nil {
+		el.AddChild(a.Signature)
+	}
+	if a.Subject != nil {
+		el.AddChild(a.Subject.Element())
+	}
+	if a.Conditions != nil {
+		el.AddChild(a.Conditions.Element())
+	}
+	for _, authnStatement := range a.AuthnStatements {
+		el.AddChild(authnStatement.Element())
+	}
+	for _, attributeStatement := range a.AttributeStatements {
+		el.AddChild(attributeStatement.Element())
+	}
+	err := etreeutils.TransformExcC14n(el, canonicalizerPrefixList)
+	if err != nil {
+		panic(err)
+	}
+	return el
+}
+
+// UnmarshalXML implements xml.Unmarshaler
+func (a *Assertion) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+	type Alias Assertion
+	aux := &struct {
+		IssueInstant RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		Alias: (*Alias)(a),
+	}
+	if err := d.DecodeElement(&aux, &start); err != nil {
+		return err
+	}
+	a.IssueInstant = time.Time(aux.IssueInstant)
+	return nil
+}
+
+// Subject represents the SAML element Subject.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.4.1
+type Subject struct {
+	XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Subject"`
+	// BaseID               *BaseID  ... TODO
+	NameID *NameID
+	// EncryptedID          *EncryptedID  ... TODO
+	SubjectConfirmations []SubjectConfirmation `xml:"SubjectConfirmation"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *Subject) Element() *etree.Element {
+	el := etree.NewElement("saml:Subject")
+	if a.NameID != nil {
+		el.AddChild(a.NameID.Element())
+	}
+	for _, v := range a.SubjectConfirmations {
+		el.AddChild(v.Element())
+	}
+	return el
+}
+
+// NameID represents the SAML element NameID.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.2.3
+type NameID struct {
+	NameQualifier   string `xml:",attr"`
+	SPNameQualifier string `xml:",attr"`
+	Format          string `xml:",attr"`
+	SPProvidedID    string `xml:",attr"`
+	Value           string `xml:",chardata"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *NameID) Element() *etree.Element {
+	el := etree.NewElement("saml:NameID")
+	if a.NameQualifier != "" {
+		el.CreateAttr("NameQualifier", a.NameQualifier)
+	}
+	if a.SPNameQualifier != "" {
+		el.CreateAttr("SPNameQualifier", a.SPNameQualifier)
+	}
+	if a.Format != "" {
+		el.CreateAttr("Format", a.Format)
+	}
+	if a.SPProvidedID != "" {
+		el.CreateAttr("SPProvidedID", a.SPProvidedID)
+	}
+	if a.Value != "" {
+		el.SetText(a.Value)
+	}
+	return el
+}
+
+// SubjectConfirmation represents the SAML element SubjectConfirmation.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.4.1.1
+type SubjectConfirmation struct {
+	Method string `xml:",attr"`
+	// BaseID               *BaseID  ... TODO
+	NameID *NameID
+	// EncryptedID          *EncryptedID  ... TODO
+	SubjectConfirmationData *SubjectConfirmationData
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *SubjectConfirmation) Element() *etree.Element {
+	el := etree.NewElement("saml:SubjectConfirmation")
+	el.CreateAttr("Method", a.Method)
+	if a.NameID != nil {
+		el.AddChild(a.NameID.Element())
+	}
+	if a.SubjectConfirmationData != nil {
+		el.AddChild(a.SubjectConfirmationData.Element())
+	}
+	return el
+}
+
+// SubjectConfirmationData represents the SAML element SubjectConfirmationData.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.4.1.2
+type SubjectConfirmationData struct {
+	NotBefore    time.Time `xml:",attr"`
+	NotOnOrAfter time.Time `xml:",attr"`
+	Recipient    string    `xml:",attr"`
+	InResponseTo string    `xml:",attr"`
+	Address      string    `xml:",attr"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (s *SubjectConfirmationData) Element() *etree.Element {
+	el := etree.NewElement("saml:SubjectConfirmationData")
+	if !s.NotBefore.IsZero() {
+		el.CreateAttr("NotBefore", s.NotBefore.Format(timeFormat))
+	}
+	if !s.NotOnOrAfter.IsZero() {
+		el.CreateAttr("NotOnOrAfter", s.NotOnOrAfter.Format(timeFormat))
+	}
+	if s.Recipient != "" {
+		el.CreateAttr("Recipient", s.Recipient)
+	}
+	if s.InResponseTo != "" {
+		el.CreateAttr("InResponseTo", s.InResponseTo)
+	}
+	if s.Address != "" {
+		el.CreateAttr("Address", s.Address)
+	}
+	return el
+}
+
+// MarshalXML implements xml.Marshaler
+func (s *SubjectConfirmationData) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+	type Alias SubjectConfirmationData
+	aux := &struct {
+		NotOnOrAfter RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		NotOnOrAfter: RelaxedTime(s.NotOnOrAfter),
+		Alias:        (*Alias)(s),
+	}
+	return e.EncodeElement(aux, start)
+}
+
+// UnmarshalXML implements xml.Unmarshaler
+func (s *SubjectConfirmationData) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+	type Alias SubjectConfirmationData
+	aux := &struct {
+		NotOnOrAfter RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		Alias: (*Alias)(s),
+	}
+	if err := d.DecodeElement(&aux, &start); err != nil {
+		return err
+	}
+	s.NotOnOrAfter = time.Time(aux.NotOnOrAfter)
+	return nil
+}
+
+// Conditions represents the SAML element Conditions.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.5.1
+type Conditions struct {
+	NotBefore            time.Time             `xml:",attr"`
+	NotOnOrAfter         time.Time             `xml:",attr"`
+	AudienceRestrictions []AudienceRestriction `xml:"AudienceRestriction"`
+	OneTimeUse           *OneTimeUse
+	ProxyRestriction     *ProxyRestriction
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (c *Conditions) Element() *etree.Element {
+	el := etree.NewElement("saml:Conditions")
+	if !c.NotBefore.IsZero() {
+		el.CreateAttr("NotBefore", c.NotBefore.Format(timeFormat))
+	}
+	if !c.NotOnOrAfter.IsZero() {
+		el.CreateAttr("NotOnOrAfter", c.NotOnOrAfter.Format(timeFormat))
+	}
+	for _, v := range c.AudienceRestrictions {
+		el.AddChild(v.Element())
+	}
+	if c.OneTimeUse != nil {
+		el.AddChild(c.OneTimeUse.Element())
+	}
+	if c.ProxyRestriction != nil {
+		el.AddChild(c.ProxyRestriction.Element())
+	}
+	return el
+}
+
+// MarshalXML implements xml.Marshaler
+func (c *Conditions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+	type Alias Conditions
+	aux := &struct {
+		NotBefore    RelaxedTime `xml:",attr"`
+		NotOnOrAfter RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		NotBefore:    RelaxedTime(c.NotBefore),
+		NotOnOrAfter: RelaxedTime(c.NotOnOrAfter),
+		Alias:        (*Alias)(c),
+	}
+	return e.EncodeElement(aux, start)
+}
+
+// UnmarshalXML implements xml.Unmarshaler
+func (c *Conditions) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+	type Alias Conditions
+	aux := &struct {
+		NotBefore    RelaxedTime `xml:",attr"`
+		NotOnOrAfter RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		Alias: (*Alias)(c),
+	}
+	if err := d.DecodeElement(&aux, &start); err != nil {
+		return err
+	}
+	c.NotBefore = time.Time(aux.NotBefore)
+	c.NotOnOrAfter = time.Time(aux.NotOnOrAfter)
+	return nil
+}
+
+// AudienceRestriction represents the SAML element AudienceRestriction.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.5.1.4
+type AudienceRestriction struct {
+	Audience Audience
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *AudienceRestriction) Element() *etree.Element {
+	el := etree.NewElement("saml:AudienceRestriction")
+	el.AddChild(a.Audience.Element())
+	return el
+}
+
+// Audience represents the SAML element Audience.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.5.1.4
+type Audience struct {
+	Value string `xml:",chardata"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *Audience) Element() *etree.Element {
+	el := etree.NewElement("saml:Audience")
+	el.SetText(a.Value)
+	return el
+}
+
+// OneTimeUse represents the SAML element OneTimeUse.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.5.1.5
+type OneTimeUse struct{}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *OneTimeUse) Element() *etree.Element {
+	return etree.NewElement("saml:OneTimeUse")
+}
+
+// ProxyRestriction represents the SAML element ProxyRestriction.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.5.1.6
+type ProxyRestriction struct {
+	Count     *int
+	Audiences []Audience
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *ProxyRestriction) Element() *etree.Element {
+	el := etree.NewElement("saml:ProxyRestriction")
+	if a.Count != nil {
+		el.CreateAttr("Count", strconv.Itoa(*a.Count))
+	}
+	for _, v := range a.Audiences {
+		el.AddChild(v.Element())
+	}
+	return el
+}
+
+// AuthnStatement represents the SAML element AuthnStatement.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.7.2
+type AuthnStatement struct {
+	AuthnInstant        time.Time  `xml:",attr"`
+	SessionIndex        string     `xml:",attr"`
+	SessionNotOnOrAfter *time.Time `xml:",attr"`
+	SubjectLocality     *SubjectLocality
+	AuthnContext        AuthnContext
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *AuthnStatement) Element() *etree.Element {
+	el := etree.NewElement("saml:AuthnStatement")
+	el.CreateAttr("AuthnInstant", a.AuthnInstant.Format(timeFormat))
+	if a.SessionIndex != "" {
+		el.CreateAttr("SessionIndex", a.SessionIndex)
+	}
+	if a.SubjectLocality != nil {
+		el.AddChild(a.SubjectLocality.Element())
+	}
+	el.AddChild(a.AuthnContext.Element())
+	return el
+}
+
+// MarshalXML implements xml.Marshaler
+func (a *AuthnStatement) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+	type Alias AuthnStatement
+	aux := &struct {
+		AuthnInstant RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		AuthnInstant: RelaxedTime(a.AuthnInstant),
+		Alias:        (*Alias)(a),
+	}
+	return e.EncodeElement(aux, start)
+}
+
+// UnmarshalXML implements xml.Unmarshaler
+func (a *AuthnStatement) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+	type Alias AuthnStatement
+	aux := &struct {
+		AuthnInstant RelaxedTime `xml:",attr"`
+		*Alias
+	}{
+		Alias: (*Alias)(a),
+	}
+	if err := d.DecodeElement(&aux, &start); err != nil {
+		return err
+	}
+	a.AuthnInstant = time.Time(aux.AuthnInstant)
+	return nil
+}
+
+// SubjectLocality represents the SAML element SubjectLocality.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.7.2.1
+type SubjectLocality struct {
+	Address string `xml:",attr"`
+	DNSName string `xml:",attr"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *SubjectLocality) Element() *etree.Element {
+	el := etree.NewElement("saml:SubjectLocality")
+	if a.Address != "" {
+		el.CreateAttr("Address", a.Address)
+	}
+	if a.DNSName != "" {
+		el.CreateAttr("DNSName", a.DNSName)
+	}
+	return el
+}
+
+// AuthnContext represents the SAML element AuthnContext.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.7.2.2
+type AuthnContext struct {
+	AuthnContextClassRef *AuthnContextClassRef
+	//AuthnContextDecl          *AuthnContextDecl        ... TODO
+	//AuthnContextDeclRef       *AuthnContextDeclRef     ... TODO
+	//AuthenticatingAuthorities []AuthenticatingAuthority... TODO
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *AuthnContext) Element() *etree.Element {
+	el := etree.NewElement("saml:AuthnContext")
+	if a.AuthnContextClassRef != nil {
+		el.AddChild(a.AuthnContextClassRef.Element())
+	}
+	return el
+}
+
+// AuthnContextClassRef represents the SAML element AuthnContextClassRef.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.7.2.2
+type AuthnContextClassRef struct {
+	Value string `xml:",chardata"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *AuthnContextClassRef) Element() *etree.Element {
+	el := etree.NewElement("saml:AuthnContextClassRef")
+	el.SetText(a.Value)
+	return el
+}
+
+// AttributeStatement represents the SAML element AttributeStatement.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.7.3
+type AttributeStatement struct {
+	Attributes []Attribute `xml:"Attribute"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *AttributeStatement) Element() *etree.Element {
+	el := etree.NewElement("saml:AttributeStatement")
+	for _, v := range a.Attributes {
+		el.AddChild(v.Element())
+	}
+	return el
+}
+
+// Attribute represents the SAML element Attribute.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.7.3.1
+type Attribute struct {
+	FriendlyName string           `xml:",attr"`
+	Name         string           `xml:",attr"`
+	NameFormat   string           `xml:",attr"`
+	Values       []AttributeValue `xml:"AttributeValue"`
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *Attribute) Element() *etree.Element {
+	el := etree.NewElement("saml:Attribute")
+	if a.FriendlyName != "" {
+		el.CreateAttr("FriendlyName", a.FriendlyName)
+	}
+	if a.Name != "" {
+		el.CreateAttr("Name", a.Name)
+	}
+	if a.NameFormat != "" {
+		el.CreateAttr("NameFormat", a.NameFormat)
+	}
+	for _, v := range a.Values {
+		el.AddChild(v.Element())
+	}
+	return el
+}
+
+// AttributeValue represents the SAML element AttributeValue.
+//
+// See http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf §2.7.3.1.1
+type AttributeValue struct {
+	Type   string `xml:"http://www.w3.org/2001/XMLSchema-instance type,attr"`
+	Value  string `xml:",chardata"`
+	NameID *NameID
+}
+
+// Element returns an etree.Element representing the object in XML form.
+func (a *AttributeValue) Element() *etree.Element {
+	el := etree.NewElement("saml:AttributeValue")
+	el.CreateAttr("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
+	el.CreateAttr("xmlns:xs", "http://www.w3.org/2001/XMLSchema")
+	el.CreateAttr("xsi:type", a.Type)
+	if a.NameID != nil {
+		el.AddChild(a.NameID.Element())
+	}
+	el.SetText(a.Value)
+	return el
+}
diff --git a/vendor/github.com/crewjam/saml/service_provider.go b/vendor/github.com/crewjam/saml/service_provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..151ff33811ae764f0192dcca3fb9ed93283efbf9
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/service_provider.go
@@ -0,0 +1,664 @@
+package saml
+
+import (
+	"bytes"
+	"compress/flate"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/base64"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"html/template"
+	"net/http"
+	"net/url"
+	"regexp"
+	"time"
+
+	"github.com/beevik/etree"
+	"github.com/crewjam/saml/logger"
+	"github.com/crewjam/saml/xmlenc"
+	dsig "github.com/russellhaering/goxmldsig"
+	"github.com/russellhaering/goxmldsig/etreeutils"
+)
+
+// NameIDFormat is the format of the id
+type NameIDFormat string
+
+func (n NameIDFormat) Element() *etree.Element {
+	el := etree.NewElement("")
+	el.SetText(string(n))
+	return el
+}
+
+// Name ID formats
+const (
+	UnspecifiedNameIDFormat  NameIDFormat = "urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified"
+	TransientNameIDFormat    NameIDFormat = "urn:oasis:names:tc:SAML:2.0:nameid-format:transient"
+	EmailAddressNameIDFormat NameIDFormat = "urn:oasis:names:tc:SAML:2.0:nameid-format:emailAddress"
+	PersistentNameIDFormat   NameIDFormat = "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"
+)
+
+// ServiceProvider implements SAML Service provider.
+//
+// In SAML, service providers delegate responsibility for identifying
+// clients to an identity provider. If you are writing an application
+// that uses passwords (or whatever) stored somewhere else, then you
+// are service provider.
+//
+// See the example directory for an example of a web application using
+// the service provider interface.
+type ServiceProvider struct {
+	// Key is the RSA private key we use to sign requests.
+	Key *rsa.PrivateKey
+
+	// Certificate is the RSA public part of Key.
+	Certificate *x509.Certificate
+
+	// MetadataURL is the full URL to the metadata endpoint on this host,
+	// i.e. https://example.com/saml/metadata
+	MetadataURL url.URL
+
+	// AcsURL is the full URL to the SAML Assertion Customer Service endpoint
+	// on this host, i.e. https://example.com/saml/acs
+	AcsURL url.URL
+
+	// IDPMetadata is the metadata from the identity provider.
+	IDPMetadata *EntityDescriptor
+
+	// AuthnNameIDFormat is the format used in the NameIDPolicy for
+	// authentication requests
+	AuthnNameIDFormat NameIDFormat
+
+	// MetadataValidDuration is a duration used to calculate validUntil
+	// attribute in the metadata endpoint
+	MetadataValidDuration time.Duration
+
+	// Logger is used to log messages for example in the event of errors
+	Logger logger.Interface
+
+	// ForceAuthn allows you to force re-authentication of users even if the user
+	// has a SSO session at the IdP.
+	ForceAuthn *bool
+}
+
+// MaxIssueDelay is the longest allowed time between when a SAML assertion is
+// issued by the IDP and the time it is received by ParseResponse. This is used
+// to prevent old responses from being replayed (while allowing for some clock
+// drift between the SP and IDP).
+const MaxIssueDelay = time.Second * 90
+
+// MaxClockSkew allows for leeway for clock skew between the IDP and SP when
+// validating assertions. It defaults to 180 seconds (matches shibboleth).
+var MaxClockSkew = time.Second * 180
+
+// DefaultValidDuration is how long we assert that the SP metadata is valid.
+const DefaultValidDuration = time.Hour * 24 * 2
+
+// DefaultCacheDuration is how long we ask the IDP to cache the SP metadata.
+const DefaultCacheDuration = time.Hour * 24 * 1
+
+// Metadata returns the service provider metadata
+func (sp *ServiceProvider) Metadata() *EntityDescriptor {
+	validDuration := DefaultValidDuration
+	if sp.MetadataValidDuration > 0 {
+		validDuration = sp.MetadataValidDuration
+	}
+
+	authnRequestsSigned := false
+	wantAssertionsSigned := true
+	return &EntityDescriptor{
+		EntityID:   sp.MetadataURL.String(),
+		ValidUntil: TimeNow().Add(validDuration),
+
+		SPSSODescriptors: []SPSSODescriptor{
+			SPSSODescriptor{
+				SSODescriptor: SSODescriptor{
+					RoleDescriptor: RoleDescriptor{
+						ProtocolSupportEnumeration: "urn:oasis:names:tc:SAML:2.0:protocol",
+						KeyDescriptors: []KeyDescriptor{
+							{
+								Use: "signing",
+								KeyInfo: KeyInfo{
+									Certificate: base64.StdEncoding.EncodeToString(sp.Certificate.Raw),
+								},
+							},
+							{
+								Use: "encryption",
+								KeyInfo: KeyInfo{
+									Certificate: base64.StdEncoding.EncodeToString(sp.Certificate.Raw),
+								},
+								EncryptionMethods: []EncryptionMethod{
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#aes128-cbc"},
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#aes192-cbc"},
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#aes256-cbc"},
+									{Algorithm: "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"},
+								},
+							},
+						},
+					},
+				},
+				AuthnRequestsSigned:  &authnRequestsSigned,
+				WantAssertionsSigned: &wantAssertionsSigned,
+
+				AssertionConsumerServices: []IndexedEndpoint{
+					IndexedEndpoint{
+						Binding:  HTTPPostBinding,
+						Location: sp.AcsURL.String(),
+						Index:    1,
+					},
+				},
+			},
+		},
+	}
+}
+
+// MakeRedirectAuthenticationRequest creates a SAML authentication request using
+// the HTTP-Redirect binding. It returns a URL that we will redirect the user to
+// in order to start the auth process.
+func (sp *ServiceProvider) MakeRedirectAuthenticationRequest(relayState string) (*url.URL, error) {
+	req, err := sp.MakeAuthenticationRequest(sp.GetSSOBindingLocation(HTTPRedirectBinding))
+	if err != nil {
+		return nil, err
+	}
+	return req.Redirect(relayState), nil
+}
+
+// Redirect returns a URL suitable for using the redirect binding with the request
+func (req *AuthnRequest) Redirect(relayState string) *url.URL {
+	w := &bytes.Buffer{}
+	w1 := base64.NewEncoder(base64.StdEncoding, w)
+	w2, _ := flate.NewWriter(w1, 9)
+	doc := etree.NewDocument()
+	doc.SetRoot(req.Element())
+	if _, err := doc.WriteTo(w2); err != nil {
+		panic(err)
+	}
+	w2.Close()
+	w1.Close()
+
+	rv, _ := url.Parse(req.Destination)
+
+	query := rv.Query()
+	query.Set("SAMLRequest", string(w.Bytes()))
+	if relayState != "" {
+		query.Set("RelayState", relayState)
+	}
+	rv.RawQuery = query.Encode()
+
+	return rv
+}
+
+// GetSSOBindingLocation returns URL for the IDP's Single Sign On Service binding
+// of the specified type (HTTPRedirectBinding or HTTPPostBinding)
+func (sp *ServiceProvider) GetSSOBindingLocation(binding string) string {
+	for _, idpSSODescriptor := range sp.IDPMetadata.IDPSSODescriptors {
+		for _, singleSignOnService := range idpSSODescriptor.SingleSignOnServices {
+			if singleSignOnService.Binding == binding {
+				return singleSignOnService.Location
+			}
+		}
+	}
+	return ""
+}
+
+// getIDPSigningCert returns the certificate which we can use to verify things
+// signed by the IDP in PEM format, or nil if no such certificate is found.
+func (sp *ServiceProvider) getIDPSigningCert() (*x509.Certificate, error) {
+	certStr := ""
+	for _, idpSSODescriptor := range sp.IDPMetadata.IDPSSODescriptors {
+		for _, keyDescriptor := range idpSSODescriptor.KeyDescriptors {
+			if keyDescriptor.Use == "signing" {
+				certStr = keyDescriptor.KeyInfo.Certificate
+				break
+			}
+		}
+	}
+
+	// If there are no explicitly signing certs, just return the first
+	// non-empty cert we find.
+	if certStr == "" {
+		for _, idpSSODescriptor := range sp.IDPMetadata.IDPSSODescriptors {
+			for _, keyDescriptor := range idpSSODescriptor.KeyDescriptors {
+				if keyDescriptor.Use == "" && keyDescriptor.KeyInfo.Certificate != "" {
+					certStr = keyDescriptor.KeyInfo.Certificate
+					break
+				}
+			}
+		}
+	}
+
+	if certStr == "" {
+		return nil, errors.New("cannot find any signing certificate in the IDP SSO descriptor")
+	}
+
+	// cleanup whitespace
+	certStr = regexp.MustCompile(`\s+`).ReplaceAllString(certStr, "")
+	certBytes, err := base64.StdEncoding.DecodeString(certStr)
+	if err != nil {
+		return nil, fmt.Errorf("cannot parse certificate: %s", err)
+	}
+
+	parsedCert, err := x509.ParseCertificate(certBytes)
+	if err != nil {
+		return nil, err
+	}
+	return parsedCert, nil
+}
+
+// MakeAuthenticationRequest produces a new AuthnRequest object for idpURL.
+func (sp *ServiceProvider) MakeAuthenticationRequest(idpURL string) (*AuthnRequest, error) {
+	var nameIDFormat string
+	switch sp.AuthnNameIDFormat {
+	case "":
+		// To maintain library back-compat, use "transient" if unset.
+		nameIDFormat = string(TransientNameIDFormat)
+	case UnspecifiedNameIDFormat:
+		// Spec defines an empty value as "unspecified" so don't set one.
+	default:
+		nameIDFormat = string(sp.AuthnNameIDFormat)
+	}
+
+	allowCreate := true
+	req := AuthnRequest{
+		AssertionConsumerServiceURL: sp.AcsURL.String(),
+		Destination:                 idpURL,
+		ProtocolBinding:             HTTPPostBinding, // default binding for the response
+		ID:                          fmt.Sprintf("id-%x", randomBytes(20)),
+		IssueInstant:                TimeNow(),
+		Version:                     "2.0",
+		Issuer: &Issuer{
+			Format: "urn:oasis:names:tc:SAML:2.0:nameid-format:entity",
+			Value:  sp.MetadataURL.String(),
+		},
+		NameIDPolicy: &NameIDPolicy{
+			AllowCreate: &allowCreate,
+			// TODO(ross): figure out exactly policy we need
+			// urn:mace:shibboleth:1.0:nameIdentifier
+			// urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+			Format: &nameIDFormat,
+		},
+		ForceAuthn: sp.ForceAuthn,
+	}
+	return &req, nil
+}
+
+// MakePostAuthenticationRequest creates a SAML authentication request using
+// the HTTP-POST binding. It returns HTML text representing an HTML form that
+// can be sent presented to a browser to initiate the login process.
+func (sp *ServiceProvider) MakePostAuthenticationRequest(relayState string) ([]byte, error) {
+	req, err := sp.MakeAuthenticationRequest(sp.GetSSOBindingLocation(HTTPPostBinding))
+	if err != nil {
+		return nil, err
+	}
+	return req.Post(relayState), nil
+}
+
+// Post returns an HTML form suitable for using the HTTP-POST binding with the request
+func (req *AuthnRequest) Post(relayState string) []byte {
+	doc := etree.NewDocument()
+	doc.SetRoot(req.Element())
+	reqBuf, err := doc.WriteToBytes()
+	if err != nil {
+		panic(err)
+	}
+	encodedReqBuf := base64.StdEncoding.EncodeToString(reqBuf)
+
+	tmpl := template.Must(template.New("saml-post-form").Parse(`` +
+		`<form method="post" action="{{.URL}}" id="SAMLRequestForm">` +
+		`<input type="hidden" name="SAMLRequest" value="{{.SAMLRequest}}" />` +
+		`<input type="hidden" name="RelayState" value="{{.RelayState}}" />` +
+		`<input id="SAMLSubmitButton" type="submit" value="Submit" />` +
+		`</form>` +
+		`<script>document.getElementById('SAMLSubmitButton').style.visibility="hidden";` +
+		`document.getElementById('SAMLRequestForm').submit();</script>`))
+	data := struct {
+		URL         string
+		SAMLRequest string
+		RelayState  string
+	}{
+		URL:         req.Destination,
+		SAMLRequest: encodedReqBuf,
+		RelayState:  relayState,
+	}
+
+	rv := bytes.Buffer{}
+	if err := tmpl.Execute(&rv, data); err != nil {
+		panic(err)
+	}
+
+	return rv.Bytes()
+}
+
+// AssertionAttributes is a list of AssertionAttribute
+type AssertionAttributes []AssertionAttribute
+
+// Get returns the assertion attribute whose Name or FriendlyName
+// matches name, or nil if no matching attribute is found.
+func (aa AssertionAttributes) Get(name string) *AssertionAttribute {
+	for _, attr := range aa {
+		if attr.Name == name {
+			return &attr
+		}
+		if attr.FriendlyName == name {
+			return &attr
+		}
+	}
+	return nil
+}
+
+// AssertionAttribute represents an attribute of the user extracted from
+// a SAML Assertion.
+type AssertionAttribute struct {
+	FriendlyName string
+	Name         string
+	Value        string
+}
+
+// InvalidResponseError is the error produced by ParseResponse when it fails.
+// The underlying error is in PrivateErr. Response is the response as it was
+// known at the time validation failed. Now is the time that was used to validate
+// time-dependent parts of the assertion.
+type InvalidResponseError struct {
+	PrivateErr error
+	Response   string
+	Now        time.Time
+}
+
+func (ivr *InvalidResponseError) Error() string {
+	return fmt.Sprintf("Authentication failed")
+}
+
+// ParseResponse extracts the SAML IDP response received in req, validates
+// it, and returns the verified attributes of the request.
+//
+// This function handles decrypting the message, verifying the digital
+// signature on the assertion, and verifying that the specified conditions
+// and properties are met.
+//
+// If the function fails it will return an InvalidResponseError whose
+// properties are useful in describing which part of the parsing process
+// failed. However, to discourage inadvertent disclosure the diagnostic
+// information, the Error() method returns a static string.
+func (sp *ServiceProvider) ParseResponse(req *http.Request, possibleRequestIDs []string) (*Assertion, error) {
+	now := TimeNow()
+	retErr := &InvalidResponseError{
+		Now:      now,
+		Response: req.PostForm.Get("SAMLResponse"),
+	}
+
+	rawResponseBuf, err := base64.StdEncoding.DecodeString(req.PostForm.Get("SAMLResponse"))
+	if err != nil {
+		retErr.PrivateErr = fmt.Errorf("cannot parse base64: %s", err)
+		return nil, retErr
+	}
+	retErr.Response = string(rawResponseBuf)
+
+	// do some validation first before we decrypt
+	resp := Response{}
+	if err := xml.Unmarshal(rawResponseBuf, &resp); err != nil {
+		retErr.PrivateErr = fmt.Errorf("cannot unmarshal response: %s", err)
+		return nil, retErr
+	}
+	if resp.Destination != sp.AcsURL.String() {
+		retErr.PrivateErr = fmt.Errorf("`Destination` does not match AcsURL (expected %q)", sp.AcsURL.String())
+		return nil, retErr
+	}
+
+	requestIDvalid := false
+	for _, possibleRequestID := range possibleRequestIDs {
+		if resp.InResponseTo == possibleRequestID {
+			requestIDvalid = true
+		}
+	}
+	if !requestIDvalid {
+		retErr.PrivateErr = fmt.Errorf("`InResponseTo` does not match any of the possible request IDs (expected %v)", possibleRequestIDs)
+		return nil, retErr
+	}
+
+	if resp.IssueInstant.Add(MaxIssueDelay).Before(now) {
+		retErr.PrivateErr = fmt.Errorf("IssueInstant expired at %s", resp.IssueInstant.Add(MaxIssueDelay))
+		return nil, retErr
+	}
+	if resp.Issuer.Value != sp.IDPMetadata.EntityID {
+		retErr.PrivateErr = fmt.Errorf("Issuer does not match the IDP metadata (expected %q)", sp.IDPMetadata.EntityID)
+		return nil, retErr
+	}
+	if resp.Status.StatusCode.Value != StatusSuccess {
+		retErr.PrivateErr = fmt.Errorf("Status code was not %s", StatusSuccess)
+		return nil, retErr
+	}
+
+	var assertion *Assertion
+	if resp.EncryptedAssertion == nil {
+
+		doc := etree.NewDocument()
+		if err := doc.ReadFromBytes(rawResponseBuf); err != nil {
+			retErr.PrivateErr = err
+			return nil, retErr
+		}
+
+		// TODO(ross): verify that the namespace is urn:oasis:names:tc:SAML:2.0:protocol
+		responseEl := doc.Root()
+		if responseEl.Tag != "Response" {
+			retErr.PrivateErr = fmt.Errorf("expected to find a response object, not %s", doc.Root().Tag)
+			return nil, retErr
+		}
+
+		if err = sp.validateSigned(responseEl); err != nil {
+			retErr.PrivateErr = err
+			return nil, retErr
+		}
+
+		assertion = resp.Assertion
+	}
+
+	// decrypt the response
+	if resp.EncryptedAssertion != nil {
+		doc := etree.NewDocument()
+		if err := doc.ReadFromBytes(rawResponseBuf); err != nil {
+			retErr.PrivateErr = err
+			return nil, retErr
+		}
+		el := doc.FindElement("//EncryptedAssertion/EncryptedData")
+		plaintextAssertion, err := xmlenc.Decrypt(sp.Key, el)
+		if err != nil {
+			retErr.PrivateErr = fmt.Errorf("failed to decrypt response: %s", err)
+			return nil, retErr
+		}
+		retErr.Response = string(plaintextAssertion)
+
+		doc = etree.NewDocument()
+		if err := doc.ReadFromBytes(plaintextAssertion); err != nil {
+			retErr.PrivateErr = fmt.Errorf("cannot parse plaintext response %v", err)
+			return nil, retErr
+		}
+
+		if err := sp.validateSigned(doc.Root()); err != nil {
+			retErr.PrivateErr = err
+			return nil, retErr
+		}
+
+		assertion = &Assertion{}
+		if err := xml.Unmarshal(plaintextAssertion, assertion); err != nil {
+			retErr.PrivateErr = err
+			return nil, retErr
+		}
+	}
+
+	if err := sp.validateAssertion(assertion, possibleRequestIDs, now); err != nil {
+		retErr.PrivateErr = fmt.Errorf("assertion invalid: %s", err)
+		return nil, retErr
+	}
+
+	return assertion, nil
+}
+
+// validateAssertion checks that the conditions specified in assertion match
+// the requirements to accept. If validation fails, it returns an error describing
+// the failure. (The digital signature on the assertion is not checked -- this
+// should be done before calling this function).
+func (sp *ServiceProvider) validateAssertion(assertion *Assertion, possibleRequestIDs []string, now time.Time) error {
+	if assertion.IssueInstant.Add(MaxIssueDelay).Before(now) {
+		return fmt.Errorf("expired on %s", assertion.IssueInstant.Add(MaxIssueDelay))
+	}
+	if assertion.Issuer.Value != sp.IDPMetadata.EntityID {
+		return fmt.Errorf("issuer is not %q", sp.IDPMetadata.EntityID)
+	}
+	for _, subjectConfirmation := range assertion.Subject.SubjectConfirmations {
+		requestIDvalid := false
+		for _, possibleRequestID := range possibleRequestIDs {
+			if subjectConfirmation.SubjectConfirmationData.InResponseTo == possibleRequestID {
+				requestIDvalid = true
+				break
+			}
+		}
+		if !requestIDvalid {
+			return fmt.Errorf("SubjectConfirmation one of the possible request IDs (%v)", possibleRequestIDs)
+		}
+		if subjectConfirmation.SubjectConfirmationData.Recipient != sp.AcsURL.String() {
+			return fmt.Errorf("SubjectConfirmation Recipient is not %s", sp.AcsURL.String())
+		}
+		if subjectConfirmation.SubjectConfirmationData.NotOnOrAfter.Add(MaxClockSkew).Before(now) {
+			return fmt.Errorf("SubjectConfirmationData is expired")
+		}
+	}
+	if assertion.Conditions.NotBefore.Add(-MaxClockSkew).After(now) {
+		return fmt.Errorf("Conditions is not yet valid")
+	}
+	if assertion.Conditions.NotOnOrAfter.Add(MaxClockSkew).Before(now) {
+		return fmt.Errorf("Conditions is expired")
+	}
+
+	audienceRestrictionsValid := false
+	for _, audienceRestriction := range assertion.Conditions.AudienceRestrictions {
+		if audienceRestriction.Audience.Value == sp.MetadataURL.String() {
+			audienceRestrictionsValid = true
+		}
+	}
+	if !audienceRestrictionsValid {
+		return fmt.Errorf("Conditions AudienceRestriction does not contain %q", sp.MetadataURL.String())
+	}
+	return nil
+}
+
+func findChild(parentEl *etree.Element, childNS string, childTag string) (*etree.Element, error) {
+	for _, childEl := range parentEl.ChildElements() {
+		if childEl.Tag != childTag {
+			continue
+		}
+
+		ctx, err := etreeutils.NSBuildParentContext(childEl)
+		if err != nil {
+			return nil, err
+		}
+		ctx, err = ctx.SubContext(childEl)
+		if err != nil {
+			return nil, err
+		}
+
+		ns, err := ctx.LookupPrefix(childEl.Space)
+		if err != nil {
+			return nil, fmt.Errorf("[%s]:%s cannot find prefix %s: %v", childNS, childTag, childEl.Space, err)
+		}
+		if ns != childNS {
+			continue
+		}
+
+		return childEl, nil
+	}
+	return nil, nil
+}
+
+// validateSigned returns a nil error iff each of the signatures on the Response and Assertion elements
+// are valid and there is at least one signature.
+func (sp *ServiceProvider) validateSigned(responseEl *etree.Element) error {
+	haveSignature := false
+
+	// Some SAML responses have the signature on the Response object, and some on the Assertion
+	// object, and some on both. We will require that at least one signature be present and that
+	// all signatures be valid
+	sigEl, err := findChild(responseEl, "http://www.w3.org/2000/09/xmldsig#", "Signature")
+	if err != nil {
+		return err
+	}
+	if sigEl != nil {
+		if err = sp.validateSignature(responseEl); err != nil {
+			return fmt.Errorf("cannot validate signature on Response: %v", err)
+		}
+		haveSignature = true
+	}
+
+	assertionEl, err := findChild(responseEl, "urn:oasis:names:tc:SAML:2.0:assertion", "Assertion")
+	if err != nil {
+		return err
+	}
+	if assertionEl != nil {
+		sigEl, err := findChild(assertionEl, "http://www.w3.org/2000/09/xmldsig#", "Signature")
+		if err != nil {
+			return err
+		}
+		if sigEl != nil {
+			if err = sp.validateSignature(assertionEl); err != nil {
+				return fmt.Errorf("cannot validate signature on Response: %v", err)
+			}
+			haveSignature = true
+		}
+	}
+
+	if !haveSignature {
+		return errors.New("either the Response or Assertion must be signed")
+	}
+	return nil
+}
+
+// validateSignature returns nill iff the Signature embedded in the element is valid
+func (sp *ServiceProvider) validateSignature(el *etree.Element) error {
+	cert, err := sp.getIDPSigningCert()
+	if err != nil {
+		return err
+	}
+
+	certificateStore := dsig.MemoryX509CertificateStore{
+		Roots: []*x509.Certificate{cert},
+	}
+
+	validationContext := dsig.NewDefaultValidationContext(&certificateStore)
+	validationContext.IdAttribute = "ID"
+	if Clock != nil {
+		validationContext.Clock = Clock
+	}
+
+	// Some SAML responses contain a RSAKeyValue element. One of two things is happening here:
+	//
+	// (1) We're getting something signed by a key we already know about -- the public key
+	//     of the signing cert provided in the metadata.
+	// (2) We're getting something signed by a key we *don't* know about, and which we have
+	//     no ability to verify.
+	//
+	// The best course of action is to just remove the KeyInfo so that dsig falls back to
+	// verifying against the public key provided in the metadata.
+	if el.FindElement("./Signature/KeyInfo/X509Data/X509Certificate") == nil {
+		if sigEl := el.FindElement("./Signature"); sigEl != nil {
+			if keyInfo := sigEl.FindElement("KeyInfo"); keyInfo != nil {
+				sigEl.RemoveChild(keyInfo)
+			}
+		}
+	}
+
+	ctx, err := etreeutils.NSBuildParentContext(el)
+	if err != nil {
+		return err
+	}
+	ctx, err = ctx.SubContext(el)
+	if err != nil {
+		return err
+	}
+	el, err = etreeutils.NSDetatch(ctx, el)
+	if err != nil {
+		return err
+	}
+
+	_, err = validationContext.Validate(el)
+	return err
+}
diff --git a/vendor/github.com/crewjam/saml/time.go b/vendor/github.com/crewjam/saml/time.go
new file mode 100644
index 0000000000000000000000000000000000000000..00ce754c8d520e0ff946e4f48f0cbe30a2331832
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/time.go
@@ -0,0 +1,48 @@
+package saml
+
+import "time"
+
+type RelaxedTime time.Time
+
+const timeFormat = "2006-01-02T15:04:05.999Z07:00"
+
+func (m RelaxedTime) MarshalText() ([]byte, error) {
+	// According to section 1.2.2 of the OASIS SAML 1.1 spec, we can't trust
+	// other applications to handle time resolution finer than a millisecond.
+	//
+	// The time MUST be expressed in UTC.
+	return []byte(m.String()), nil
+}
+
+func (m RelaxedTime) String() string {
+	return time.Time(m).Round(time.Millisecond).UTC().Format(timeFormat)
+}
+
+func (m *RelaxedTime) UnmarshalText(text []byte) error {
+	if len(text) == 0 {
+		*m = RelaxedTime(time.Time{})
+		return nil
+	}
+	t, err1 := time.Parse(time.RFC3339, string(text))
+	if err1 == nil {
+		t = t.Round(time.Millisecond)
+		*m = RelaxedTime(t)
+		return nil
+	}
+
+	t, err2 := time.Parse(time.RFC3339Nano, string(text))
+	if err2 == nil {
+		t = t.Round(time.Millisecond)
+		*m = RelaxedTime(t)
+		return nil
+	}
+
+	t, err2 = time.Parse("2006-01-02T15:04:05.999999999", string(text))
+	if err2 == nil {
+		t = t.Round(time.Millisecond)
+		*m = RelaxedTime(t)
+		return nil
+	}
+
+	return err1
+}
diff --git a/vendor/github.com/crewjam/saml/util.go b/vendor/github.com/crewjam/saml/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c5e2b240c90d2be7be0e045a5f9ef6c1811ec80
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/util.go
@@ -0,0 +1,29 @@
+package saml
+
+import (
+	"crypto/rand"
+	"time"
+
+	dsig "github.com/russellhaering/goxmldsig"
+)
+
+// TimeNow is a function that returns the current time. The default
+// value is time.Now, but it can be replaced for testing.
+var TimeNow = func() time.Time { return time.Now().UTC() }
+
+// Clock is assigned to dsig validation and signing contexts if it is
+// not nil, otherwise the default clock is used.
+var Clock *dsig.Clock
+
+// RandReader is the io.Reader that produces cryptographically random
+// bytes when they are need by the library. The default value is
+// rand.Reader, but it can be replaced for testing.
+var RandReader = rand.Reader
+
+func randomBytes(n int) []byte {
+	rv := make([]byte, n)
+	if _, err := RandReader.Read(rv); err != nil {
+		panic(err)
+	}
+	return rv
+}
diff --git a/vendor/github.com/crewjam/saml/xmlenc/cbc.go b/vendor/github.com/crewjam/saml/xmlenc/cbc.go
new file mode 100644
index 0000000000000000000000000000000000000000..11ee210d7e93c034259a900c563ef577ade6d57e
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/xmlenc/cbc.go
@@ -0,0 +1,187 @@
+package xmlenc
+
+import (
+	"crypto/aes"
+	"crypto/cipher"
+	"crypto/des" // nolint: gas
+	"encoding/base64"
+	"errors"
+	"fmt"
+
+	"github.com/beevik/etree"
+)
+
+// CBC implements Decrypter and Encrypter for block ciphers in CBC mode
+type CBC struct {
+	keySize   int
+	algorithm string
+	cipher    func([]byte) (cipher.Block, error)
+}
+
+// KeySize returns the length of the key required.
+func (e CBC) KeySize() int {
+	return e.keySize
+}
+
+// Algorithm returns the name of the algorithm, as will be found
+// in an xenc:EncryptionMethod element.
+func (e CBC) Algorithm() string {
+	return e.algorithm
+}
+
+// Encrypt encrypts plaintext with key, which should be a []byte of length KeySize().
+// It returns an xenc:EncryptedData element.
+func (e CBC) Encrypt(key interface{}, plaintext []byte) (*etree.Element, error) {
+	keyBuf, ok := key.([]byte)
+	if !ok {
+		return nil, ErrIncorrectKeyType("[]byte")
+	}
+	if len(keyBuf) != e.keySize {
+		return nil, ErrIncorrectKeyLength(e.keySize)
+	}
+
+	block, err := e.cipher(keyBuf)
+	if err != nil {
+		return nil, err
+	}
+
+	encryptedDataEl := etree.NewElement("xenc:EncryptedData")
+	encryptedDataEl.CreateAttr("xmlns:xenc", "http://www.w3.org/2001/04/xmlenc#")
+	{
+		randBuf := make([]byte, 16)
+		if _, err := RandReader.Read(randBuf); err != nil {
+			return nil, err
+		}
+		encryptedDataEl.CreateAttr("Id", fmt.Sprintf("_%x", randBuf))
+	}
+
+	em := encryptedDataEl.CreateElement("xenc:EncryptionMethod")
+	em.CreateAttr("Algorithm", e.algorithm)
+	em.CreateAttr("xmlns:xenc", "http://www.w3.org/2001/04/xmlenc#")
+
+	plaintext = appendPadding(plaintext, block.BlockSize())
+
+	iv := make([]byte, block.BlockSize())
+	if _, err := RandReader.Read(iv); err != nil {
+		return nil, err
+	}
+
+	mode := cipher.NewCBCEncrypter(block, iv)
+	ciphertext := make([]byte, len(plaintext))
+	mode.CryptBlocks(ciphertext, plaintext)
+	ciphertext = append(iv, ciphertext...)
+
+	cd := encryptedDataEl.CreateElement("xenc:CipherData")
+	cd.CreateAttr("xmlns:xenc", "http://www.w3.org/2001/04/xmlenc#")
+	cd.CreateElement("xenc:CipherValue").SetText(base64.StdEncoding.EncodeToString(ciphertext))
+	return encryptedDataEl, nil
+}
+
+// Decrypt decrypts an encrypted element with key. If the ciphertext contains an
+// EncryptedKey element, then the type of `key` is determined by the registered
+// Decryptor for the EncryptedKey element. Otherwise, `key` must be a []byte of
+// length KeySize().
+func (e CBC) Decrypt(key interface{}, ciphertextEl *etree.Element) ([]byte, error) {
+	// If the key is encrypted, decrypt it.
+	if encryptedKeyEl := ciphertextEl.FindElement("./KeyInfo/EncryptedKey"); encryptedKeyEl != nil {
+		var err error
+		key, err = Decrypt(key, encryptedKeyEl)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keyBuf, ok := key.([]byte)
+	if !ok {
+		return nil, ErrIncorrectKeyType("[]byte")
+	}
+	if len(keyBuf) != e.KeySize() {
+		return nil, ErrIncorrectKeyLength(e.KeySize())
+	}
+
+	block, err := e.cipher(keyBuf)
+	if err != nil {
+		return nil, err
+	}
+
+	ciphertext, err := getCiphertext(ciphertextEl)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(ciphertext) < block.BlockSize() {
+		return nil, errors.New("ciphertext too short")
+	}
+
+	iv := ciphertext[:aes.BlockSize]
+	ciphertext = ciphertext[aes.BlockSize:]
+
+	mode := cipher.NewCBCDecrypter(block, iv)
+	plaintext := make([]byte, len(ciphertext))
+	mode.CryptBlocks(plaintext, ciphertext) // decrypt in place
+
+	plaintext, err = stripPadding(plaintext)
+	if err != nil {
+		return nil, err
+	}
+
+	return plaintext, nil
+}
+
+var (
+	// AES128CBC implements AES128-CBC symetric key mode for encryption and decryption
+	AES128CBC BlockCipher = CBC{
+		keySize:   16,
+		algorithm: "http://www.w3.org/2001/04/xmlenc#aes128-cbc",
+		cipher:    aes.NewCipher,
+	}
+
+	// AES192CBC implements AES192-CBC symetric key mode for encryption and decryption
+	AES192CBC BlockCipher = CBC{
+		keySize:   24,
+		algorithm: "http://www.w3.org/2001/04/xmlenc#aes192-cbc",
+		cipher:    aes.NewCipher,
+	}
+
+	// AES256CBC implements AES256-CBC symetric key mode for encryption and decryption
+	AES256CBC BlockCipher = CBC{
+		keySize:   32,
+		algorithm: "http://www.w3.org/2001/04/xmlenc#aes256-cbc",
+		cipher:    aes.NewCipher,
+	}
+
+	// TripleDES implements 3DES in CBC mode for encryption and decryption
+	TripleDES BlockCipher = CBC{
+		keySize:   8,
+		algorithm: "http://www.w3.org/2001/04/xmlenc#tripledes-cbc",
+		cipher:    des.NewCipher,
+	}
+)
+
+func init() {
+	RegisterDecrypter(AES128CBC)
+	RegisterDecrypter(AES192CBC)
+	RegisterDecrypter(AES256CBC)
+	RegisterDecrypter(TripleDES)
+}
+
+func appendPadding(buf []byte, blockSize int) []byte {
+	paddingBytes := blockSize - (len(buf) % blockSize)
+	padding := make([]byte, paddingBytes)
+	padding[len(padding)-1] = byte(paddingBytes)
+	return append(buf, padding...)
+}
+
+func stripPadding(buf []byte) ([]byte, error) {
+	if len(buf) < 1 {
+		return nil, errors.New("buffer is too short for padding")
+	}
+	paddingBytes := int(buf[len(buf)-1])
+	if paddingBytes > len(buf)-1 {
+		return nil, errors.New("buffer is too short for padding")
+	}
+	if paddingBytes < 1 {
+		return nil, errors.New("padding must be at least one byte")
+	}
+	return buf[:len(buf)-paddingBytes], nil
+}
diff --git a/vendor/github.com/crewjam/saml/xmlenc/decrypt.go b/vendor/github.com/crewjam/saml/xmlenc/decrypt.go
new file mode 100644
index 0000000000000000000000000000000000000000..f7d720239bccd41615972a4eb69bcfa943a16403
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/xmlenc/decrypt.go
@@ -0,0 +1,117 @@
+package xmlenc
+
+import (
+
+	// nolint: gas
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/base64"
+	"encoding/pem"
+	"errors"
+	"fmt"
+
+	"strings"
+
+	"github.com/beevik/etree"
+)
+
+// ErrAlgorithmNotImplemented is returned when encryption used is not
+// supported.
+type ErrAlgorithmNotImplemented string
+
+func (e ErrAlgorithmNotImplemented) Error() string {
+	return "algorithm is not implemented: " + string(e)
+}
+
+// ErrCannotFindRequiredElement is returned by Decrypt when a required
+// element cannot be found.
+type ErrCannotFindRequiredElement string
+
+func (e ErrCannotFindRequiredElement) Error() string {
+	return "cannot find required element: " + string(e)
+}
+
+// ErrIncorrectTag is returned when Decrypt is passed an element which
+// is neither an EncryptedType nor an EncryptedKey
+var ErrIncorrectTag = fmt.Errorf("tag must be an EncryptedType or EncryptedKey")
+
+// ErrIncorrectKeyLength is returned when the fixed length key is not
+// of the required length.
+type ErrIncorrectKeyLength int
+
+func (e ErrIncorrectKeyLength) Error() string {
+	return fmt.Sprintf("expected key to be %d bytes", int(e))
+}
+
+// ErrIncorrectKeyType is returned when the key is not the correct type
+type ErrIncorrectKeyType string
+
+func (e ErrIncorrectKeyType) Error() string {
+	return fmt.Sprintf("expected key to be %s", string(e))
+}
+
+// Decrypt decrypts the encrypted data using the provided key. If the
+// data are encrypted using AES or 3DEC, then the key should be a []byte.
+// If the data are encrypted with PKCS1v15 or RSA-OAEP-MGF1P then key should
+// be a *rsa.PrivateKey.
+func Decrypt(key interface{}, ciphertextEl *etree.Element) ([]byte, error) {
+	encryptionMethodEl := ciphertextEl.FindElement("./EncryptionMethod")
+	if encryptionMethodEl == nil {
+		return nil, ErrCannotFindRequiredElement("EncryptionMethod")
+	}
+	algorithm := encryptionMethodEl.SelectAttrValue("Algorithm", "")
+	decrypter, ok := decrypters[algorithm]
+	if !ok {
+		return nil, ErrAlgorithmNotImplemented(algorithm)
+	}
+	return decrypter.Decrypt(key, ciphertextEl)
+}
+
+func getCiphertext(encryptedKey *etree.Element) ([]byte, error) {
+	ciphertextEl := encryptedKey.FindElement("./CipherData/CipherValue")
+	if ciphertextEl == nil {
+		return nil, fmt.Errorf("cannot find CipherData element containing a CipherValue element")
+	}
+	ciphertext, err := base64.StdEncoding.DecodeString(strings.TrimSpace(ciphertextEl.Text()))
+	if err != nil {
+		return nil, err
+	}
+	return ciphertext, nil
+}
+
+func validateRSAKey(key interface{}, encryptedKey *etree.Element) (*rsa.PrivateKey, error) {
+	rsaKey, ok := key.(*rsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("expected key to be a *rsa.PrivateKey")
+	}
+
+	// extract and verify that the public key matches the certificate
+	// this section is included to either let the service know up front
+	// if the key will work, or let the service provider know which key
+	// to use to decrypt the message. Either way, verification is not
+	// security-critical.
+	if el := encryptedKey.FindElement("./KeyInfo/X509Data/X509Certificate"); el != nil {
+		certPEMbuf := el.Text()
+		certPEMbuf = "-----BEGIN CERTIFICATE-----\n" + certPEMbuf + "\n-----END CERTIFICATE-----\n"
+		certPEM, _ := pem.Decode([]byte(certPEMbuf))
+		if certPEM == nil {
+			return nil, fmt.Errorf("invalid certificate")
+		}
+		cert, err := x509.ParseCertificate(certPEM.Bytes)
+		if err != nil {
+			return nil, err
+		}
+		pubKey, ok := cert.PublicKey.(*rsa.PublicKey)
+		if !ok {
+			return nil, fmt.Errorf("expected certificate to be an *rsa.PublicKey")
+		}
+		if rsaKey.N.Cmp(pubKey.N) != 0 || rsaKey.E != pubKey.E {
+			return nil, fmt.Errorf("certificate does not match provided key")
+		}
+	} else if el = encryptedKey.FindElement("./KeyInfo/X509Data/X509IssuerSerial"); el != nil {
+		// TODO: determine how to validate the issuer serial information
+	} else {
+		return nil, ErrCannotFindRequiredElement("X509Certificate or X509IssuerSerial")
+	}
+	return rsaKey, nil
+}
diff --git a/vendor/github.com/crewjam/saml/xmlenc/digest.go b/vendor/github.com/crewjam/saml/xmlenc/digest.go
new file mode 100644
index 0000000000000000000000000000000000000000..89f10226574d5947f627c0ca7eabe1aea3d4a78f
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/xmlenc/digest.go
@@ -0,0 +1,56 @@
+package xmlenc
+
+import (
+	"crypto/sha1"
+	"crypto/sha256"
+	"crypto/sha512"
+	"hash"
+
+	"golang.org/x/crypto/ripemd160"
+)
+
+type digestMethod struct {
+	algorithm string
+	hash      func() hash.Hash
+}
+
+func (dm digestMethod) Algorithm() string {
+	return dm.algorithm
+}
+
+func (dm digestMethod) Hash() hash.Hash {
+	return dm.hash()
+}
+
+var (
+	// SHA1 implements the SHA-1 digest method (which is considered insecure)
+	SHA1 = digestMethod{
+		algorithm: "http://www.w3.org/2000/09/xmldsig#sha1",
+		hash:      sha1.New,
+	}
+
+	// SHA256 implements the SHA-256 digest method
+	SHA256 = digestMethod{
+		algorithm: "http://www.w3.org/2000/09/xmldsig#sha256",
+		hash:      sha256.New,
+	}
+
+	// SHA512 implements the SHA-512 digest method
+	SHA512 = digestMethod{
+		algorithm: "http://www.w3.org/2000/09/xmldsig#sha512",
+		hash:      sha512.New,
+	}
+
+	// RIPEMD160 implements the RIPEMD160 digest method
+	RIPEMD160 = digestMethod{
+		algorithm: "http://www.w3.org/2000/09/xmldsig#ripemd160",
+		hash:      ripemd160.New,
+	}
+)
+
+func init() {
+	RegisterDigestMethod(SHA1)
+	RegisterDigestMethod(SHA256)
+	RegisterDigestMethod(SHA512)
+	RegisterDigestMethod(RIPEMD160)
+}
diff --git a/vendor/github.com/crewjam/saml/xmlenc/fuzz.go b/vendor/github.com/crewjam/saml/xmlenc/fuzz.go
new file mode 100644
index 0000000000000000000000000000000000000000..c035d65f7609d49c4a24a00040ca2691518bb8db
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/xmlenc/fuzz.go
@@ -0,0 +1,49 @@
+package xmlenc
+
+import (
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+
+	"github.com/beevik/etree"
+)
+
+var testKey = func() *rsa.PrivateKey {
+	const keyStr = `-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDkXTUsWzRVpUHjbDpWCfYDfXmQ/q4LkaioZoTpu4ut1Q3eQC5t
+gD14agJhgT8yzeY5S/YNlwCyuVkjuFyoyTHFX2IOPpz7jnh4KnQ+B1IH9fY/+kmk
+zHJgxSUDJsdUMPgGpKt5hnEn7ziXAWXLc2udFbnHwhi9TXXwRHGi9wZ4YwIDAQAB
+AoGBALNTnlXeqRI4W61DZ+v4ln/XIIeD9xiOoWrcVrNU2zL+g41ryQmkEqFkXcpD
+vGUg2xFTXTz+v0WZ1y39sIW6uKFRYUfaNsF6iVfGAyx1VWK/jgtPnCWDQy26Eby0
+BqpbZRy1a6MLYVEG/5bvZE01CDV4XttpTrNX91WWcYGduJxBAkEA6ED1ZOqIzBpu
+c2KAo+bWmroCH8+cSDk0gVq6bnRB+EEhRCmo/VgvndWLxfexdGmDIOAIisB06N5a
+GzBSCaEY/QJBAPu2cNvuuBNLwrlxPCwOEpIHYT4gJq8UMtg6O6N+u++nYCGhK6uo
+VCmrKY+UewyNIcsLZF0jsNI2qJjiU1vQxN8CQQDfQJnigMQwlfO3/Ga1po6Buu2R
+0IpkroB3G1R8GkrTrR+iGv2zUdKrwHsUOC2fPlFrB4+OeMOomRw6aG9jjDStAkB1
+ztiZhuvuVAoKIv5HnDqC0CNqIUAZtzlozDB3f+xT6SFr+/Plfn4Nlod4JMVGhZNo
+ZaeOlBLBAEX+cAcVtOs/AkBicZOAPv84ABmFfyhXhYaAuacaJLq//jg+t+URUOg+
+XZS9naRmawEQxOkZQVoMeKgvu05+V4MniFqdQBINIkr5
+-----END RSA PRIVATE KEY-----`
+	b, _ := pem.Decode([]byte(keyStr))
+	k, err := x509.ParsePKCS1PrivateKey(b.Bytes)
+	if err != nil {
+		panic(err)
+	}
+	return k
+}()
+
+// Fuzz is the go-fuzz fuzzing function
+func Fuzz(data []byte) int {
+	doc := etree.NewDocument()
+	if err := doc.ReadFromBytes(data); err != nil {
+		return 0
+	}
+	if doc.Root() == nil {
+		return 0
+	}
+
+	if _, err := Decrypt(testKey, doc.Root()); err != nil {
+		return 0
+	}
+	return 1
+}
diff --git a/vendor/github.com/crewjam/saml/xmlenc/pubkey.go b/vendor/github.com/crewjam/saml/xmlenc/pubkey.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ccdd6c65aaff66a0785dd5f93a9705f2e8c5685
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/xmlenc/pubkey.go
@@ -0,0 +1,161 @@
+package xmlenc
+
+import (
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/base64"
+	"fmt"
+
+	"github.com/beevik/etree"
+)
+
+// RSA implements Encrypter and Decrypter using RSA public key encryption.
+//
+// Use function like OAEP(), or PKCS1v15() to get an instance of this type ready
+// to use.
+type RSA struct {
+	BlockCipher  BlockCipher
+	DigestMethod DigestMethod // only for OAEP
+
+	algorithm    string
+	keyEncrypter func(e RSA, pubKey *rsa.PublicKey, plaintext []byte) ([]byte, error)
+	keyDecrypter func(e RSA, privKey *rsa.PrivateKey, ciphertext []byte) ([]byte, error)
+}
+
+// Algorithm returns the name of the algorithm
+func (e RSA) Algorithm() string {
+	return e.algorithm
+}
+
+// Encrypt implements encrypter. certificate must be a []byte containing the ASN.1 bytes
+// of certificate containing an RSA public key.
+func (e RSA) Encrypt(certificate interface{}, plaintext []byte) (*etree.Element, error) {
+	cert, ok := certificate.(*x509.Certificate)
+	if !ok {
+		return nil, ErrIncorrectKeyType("*x.509 certificate")
+	}
+
+	pubKey, ok := cert.PublicKey.(*rsa.PublicKey)
+	if !ok {
+		return nil, ErrIncorrectKeyType("x.509 certificate with an RSA public key")
+	}
+
+	// generate a key
+	key := make([]byte, e.BlockCipher.KeySize())
+	if _, err := RandReader.Read(key); err != nil {
+		return nil, err
+	}
+
+	keyInfoEl := etree.NewElement("ds:KeyInfo")
+	keyInfoEl.CreateAttr("xmlns:ds", "http://www.w3.org/2000/09/xmldsig#")
+
+	encryptedKey := keyInfoEl.CreateElement("xenc:EncryptedKey")
+	{
+		randBuf := make([]byte, 16)
+		if _, err := RandReader.Read(randBuf); err != nil {
+			return nil, err
+		}
+		encryptedKey.CreateAttr("Id", fmt.Sprintf("_%x", randBuf))
+	}
+	encryptedKey.CreateAttr("xmlns:xenc", "http://www.w3.org/2001/04/xmlenc#")
+
+	encryptionMethodEl := encryptedKey.CreateElement("xenc:EncryptionMethod")
+	encryptionMethodEl.CreateAttr("Algorithm", e.algorithm)
+	encryptionMethodEl.CreateAttr("xmlns:xenc", "http://www.w3.org/2001/04/xmlenc#")
+	if e.DigestMethod != nil {
+		dm := encryptionMethodEl.CreateElement("ds:DigestMethod")
+		dm.CreateAttr("Algorithm", e.DigestMethod.Algorithm())
+		dm.CreateAttr("xmlns:ds", "http://www.w3.org/2000/09/xmldsig#")
+	}
+	{
+		innerKeyInfoEl := encryptedKey.CreateElement("ds:KeyInfo")
+		x509data := innerKeyInfoEl.CreateElement("ds:X509Data")
+		x509data.CreateElement("ds:X509Certificate").SetText(
+			base64.StdEncoding.EncodeToString(cert.Raw),
+		)
+	}
+
+	buf, err := e.keyEncrypter(e, pubKey, key)
+	if err != nil {
+		return nil, err
+	}
+
+	cd := encryptedKey.CreateElement("xenc:CipherData")
+	cd.CreateAttr("xmlns:xenc", "http://www.w3.org/2001/04/xmlenc#")
+	cd.CreateElement("xenc:CipherValue").SetText(base64.StdEncoding.EncodeToString(buf))
+	encryptedDataEl, err := e.BlockCipher.Encrypt(key, plaintext)
+	if err != nil {
+		return nil, err
+	}
+	encryptedDataEl.InsertChild(encryptedDataEl.FindElement("./CipherData"), keyInfoEl)
+
+	return encryptedDataEl, nil
+}
+
+// Decrypt implements Decryptor. `key` must be an *rsa.PrivateKey.
+func (e RSA) Decrypt(key interface{}, ciphertextEl *etree.Element) ([]byte, error) {
+	rsaKey, err := validateRSAKey(key, ciphertextEl)
+	if err != nil {
+		return nil, err
+	}
+
+	ciphertext, err := getCiphertext(ciphertextEl)
+	if err != nil {
+		return nil, err
+	}
+
+	{
+		digestMethodEl := ciphertextEl.FindElement("./EncryptionMethod/DigestMethod")
+		if digestMethodEl == nil {
+			return nil, fmt.Errorf("cannot find required DigestMethod element")
+		}
+		hashAlgorithmStr := digestMethodEl.SelectAttrValue("Algorithm", "")
+		digestMethod, ok := digestMethods[hashAlgorithmStr]
+		if !ok {
+			return nil, ErrAlgorithmNotImplemented(hashAlgorithmStr)
+		}
+		e.DigestMethod = digestMethod
+	}
+
+	return e.keyDecrypter(e, rsaKey, ciphertext)
+}
+
+// OAEP returns a version of RSA that implements RSA in OAEP-MGF1P mode. By default
+// the block cipher used is AES-256 CBC and the digest method is SHA-256. You can
+// specify other ciphers and digest methods by assigning to BlockCipher or
+// DigestMethod.
+func OAEP() RSA {
+	return RSA{
+		BlockCipher:  AES256CBC,
+		DigestMethod: SHA256,
+		algorithm:    "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p",
+		keyEncrypter: func(e RSA, pubKey *rsa.PublicKey, plaintext []byte) ([]byte, error) {
+			return rsa.EncryptOAEP(e.DigestMethod.Hash(), RandReader, pubKey, plaintext, nil)
+		},
+		keyDecrypter: func(e RSA, privKey *rsa.PrivateKey, ciphertext []byte) ([]byte, error) {
+			return rsa.DecryptOAEP(e.DigestMethod.Hash(), RandReader, privKey, ciphertext, nil)
+		},
+	}
+}
+
+// PKCS1v15 returns a version of RSA that implements RSA in PKCS1v15 mode. By default
+// the block cipher used is AES-256 CBC. The DigestMethod field is ignored because PKCS1v15
+// does not use a digest function.
+func PKCS1v15() RSA {
+	return RSA{
+		BlockCipher:  AES256CBC,
+		DigestMethod: nil,
+		algorithm:    "http://www.w3.org/2001/04/xmlenc#rsa-1_5",
+		keyEncrypter: func(e RSA, pubKey *rsa.PublicKey, plaintext []byte) ([]byte, error) {
+			return rsa.EncryptPKCS1v15(RandReader, pubKey, plaintext)
+		},
+		keyDecrypter: func(e RSA, privKey *rsa.PrivateKey, ciphertext []byte) ([]byte, error) {
+			return rsa.DecryptPKCS1v15(RandReader, privKey, ciphertext)
+		},
+	}
+}
+
+func init() {
+	RegisterDecrypter(OAEP())
+	RegisterDecrypter(PKCS1v15())
+}
diff --git a/vendor/github.com/crewjam/saml/xmlenc/xmlenc.go b/vendor/github.com/crewjam/saml/xmlenc/xmlenc.go
new file mode 100644
index 0000000000000000000000000000000000000000..f7f94924944c1bda2d15bac4b052652b202cb407
--- /dev/null
+++ b/vendor/github.com/crewjam/saml/xmlenc/xmlenc.go
@@ -0,0 +1,63 @@
+// Package xmlenc is a partial implementation of the xmlenc standard
+// as described in https://www.w3.org/TR/2002/REC-xmlenc-core-20021210/Overview.html.
+// The purpose of this implementation is to support encrypted SAML assertions.
+package xmlenc
+
+import (
+	"crypto/rand"
+	"hash"
+	"io"
+
+	"github.com/beevik/etree"
+)
+
+// RandReader is a thunk that allows test to replace the source of randomness used by
+// this package. By default it is Reader from crypto/rand.
+var RandReader io.Reader = rand.Reader
+
+// Encrypter is an interface that encrypts things. Given a plaintext it returns an
+// XML EncryptedData or EncryptedKey element. The required type of `key` varies
+// depending on the implementation.
+type Encrypter interface {
+	Encrypt(key interface{}, plaintext []byte) (*etree.Element, error)
+}
+
+// Decrypter is an interface that decrypts things. The Decrypt() method returns the
+// plaintext version of the EncryptedData or EncryptedKey element passed.
+//
+// You probably don't have to use this interface directly, instead you may call
+// Decrypt() and it will examine the element to determine which Decrypter to use.
+type Decrypter interface {
+	Algorithm() string
+	Decrypt(key interface{}, ciphertextEl *etree.Element) ([]byte, error)
+}
+
+// DigestMethod represents a digest method such as SHA1, etc.
+type DigestMethod interface {
+	Algorithm() string
+	Hash() hash.Hash
+}
+
+var (
+	decrypters    = map[string]Decrypter{}
+	digestMethods = map[string]DigestMethod{}
+)
+
+// RegisterDecrypter registers the specified decrypter to that it can be
+// used with Decrypt().
+func RegisterDecrypter(d Decrypter) {
+	decrypters[d.Algorithm()] = d
+}
+
+// RegisterDigestMethod registers the specified digest method to that it can be
+// used with Decrypt().
+func RegisterDigestMethod(dm DigestMethod) {
+	digestMethods[dm.Algorithm()] = dm
+}
+
+// BlockCipher implements a cipher with a fixed size key like AES or 3DES.
+type BlockCipher interface {
+	Encrypter
+	Decrypter
+	KeySize() int
+}
diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..5c304d1a4a7b439f767990bf1360d3283e45d0ee
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..47bc136b0c08eff35ffc33c1287861ceebeb7289
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/README.md
@@ -0,0 +1,69 @@
+clockwork
+=========
+
+[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork)
+[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork)
+
+a simple fake clock for golang
+
+# Usage
+
+Replace uses of the `time` package with the `clockwork.Clock` interface instead.
+
+For example, instead of using `time.Sleep` directly:
+
+```
+func my_func() {
+	time.Sleep(3 * time.Second)
+	do_something()
+}
+```
+
+inject a clock and use its `Sleep` method instead:
+
+```
+func my_func(clock clockwork.Clock) {
+	clock.Sleep(3 * time.Second)
+	do_something()
+}
+```
+
+Now you can easily test `my_func` with a `FakeClock`:
+
+```
+func TestMyFunc(t *testing.T) {
+	c := clockwork.NewFakeClock()
+
+	// Start our sleepy function
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go func() {
+		my_func(c)
+		wg.Done()
+	}()
+
+	// Ensure we wait until my_func is sleeping
+	c.BlockUntil(1)
+
+	assert_state()
+
+	// Advance the FakeClock forward in time
+	c.Advance(3 * time.Second)
+
+	// Wait until the function completes
+	wg.Wait()
+
+	assert_state()
+}
+```
+
+and in production builds, simply inject the real clock instead:
+```
+my_func(clockwork.NewRealClock())
+```
+
+See [example_test.go](example_test.go) for a full example.
+
+# Credits
+
+clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time)
diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go
new file mode 100644
index 0000000000000000000000000000000000000000..999fddd51a4c395fa41637740f32275dd2fa435c
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/clockwork.go
@@ -0,0 +1,179 @@
+package clockwork
+
+import (
+	"sync"
+	"time"
+)
+
+// Clock provides an interface that packages can use instead of directly
+// using the time module, so that chronology-related behavior can be tested
+type Clock interface {
+	After(d time.Duration) <-chan time.Time
+	Sleep(d time.Duration)
+	Now() time.Time
+	Since(t time.Time) time.Duration
+}
+
+// FakeClock provides an interface for a clock which can be
+// manually advanced through time
+type FakeClock interface {
+	Clock
+	// Advance advances the FakeClock to a new point in time, ensuring any existing
+	// sleepers are notified appropriately before returning
+	Advance(d time.Duration)
+	// BlockUntil will block until the FakeClock has the given number of
+	// sleepers (callers of Sleep or After)
+	BlockUntil(n int)
+}
+
+// NewRealClock returns a Clock which simply delegates calls to the actual time
+// package; it should be used by packages in production.
+func NewRealClock() Clock {
+	return &realClock{}
+}
+
+// NewFakeClock returns a FakeClock implementation which can be
+// manually advanced through time for testing. The initial time of the
+// FakeClock will be an arbitrary non-zero time.
+func NewFakeClock() FakeClock {
+	// use a fixture that does not fulfill Time.IsZero()
+	return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC))
+}
+
+// NewFakeClockAt returns a FakeClock initialised at the given time.Time.
+func NewFakeClockAt(t time.Time) FakeClock {
+	return &fakeClock{
+		time: t,
+	}
+}
+
+type realClock struct{}
+
+func (rc *realClock) After(d time.Duration) <-chan time.Time {
+	return time.After(d)
+}
+
+func (rc *realClock) Sleep(d time.Duration) {
+	time.Sleep(d)
+}
+
+func (rc *realClock) Now() time.Time {
+	return time.Now()
+}
+
+func (rc *realClock) Since(t time.Time) time.Duration {
+	return rc.Now().Sub(t)
+}
+
+type fakeClock struct {
+	sleepers []*sleeper
+	blockers []*blocker
+	time     time.Time
+
+	l sync.RWMutex
+}
+
+// sleeper represents a caller of After or Sleep
+type sleeper struct {
+	until time.Time
+	done  chan time.Time
+}
+
+// blocker represents a caller of BlockUntil
+type blocker struct {
+	count int
+	ch    chan struct{}
+}
+
+// After mimics time.After; it waits for the given duration to elapse on the
+// fakeClock, then sends the current time on the returned channel.
+func (fc *fakeClock) After(d time.Duration) <-chan time.Time {
+	fc.l.Lock()
+	defer fc.l.Unlock()
+	now := fc.time
+	done := make(chan time.Time, 1)
+	if d.Nanoseconds() == 0 {
+		// special case - trigger immediately
+		done <- now
+	} else {
+		// otherwise, add to the set of sleepers
+		s := &sleeper{
+			until: now.Add(d),
+			done:  done,
+		}
+		fc.sleepers = append(fc.sleepers, s)
+		// and notify any blockers
+		fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+	}
+	return done
+}
+
+// notifyBlockers notifies all the blockers waiting until the
+// given number of sleepers are waiting on the fakeClock. It
+// returns an updated slice of blockers (i.e. those still waiting)
+func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) {
+	for _, b := range blockers {
+		if b.count == count {
+			close(b.ch)
+		} else {
+			newBlockers = append(newBlockers, b)
+		}
+	}
+	return
+}
+
+// Sleep blocks until the given duration has passed on the fakeClock
+func (fc *fakeClock) Sleep(d time.Duration) {
+	<-fc.After(d)
+}
+
+// Time returns the current time of the fakeClock
+func (fc *fakeClock) Now() time.Time {
+	fc.l.RLock()
+	t := fc.time
+	fc.l.RUnlock()
+	return t
+}
+
+// Since returns the duration that has passed since the given time on the fakeClock
+func (fc *fakeClock) Since(t time.Time) time.Duration {
+	return fc.Now().Sub(t)
+}
+
+// Advance advances fakeClock to a new point in time, ensuring channels from any
+// previous invocations of After are notified appropriately before returning
+func (fc *fakeClock) Advance(d time.Duration) {
+	fc.l.Lock()
+	defer fc.l.Unlock()
+	end := fc.time.Add(d)
+	var newSleepers []*sleeper
+	for _, s := range fc.sleepers {
+		if end.Sub(s.until) >= 0 {
+			s.done <- end
+		} else {
+			newSleepers = append(newSleepers, s)
+		}
+	}
+	fc.sleepers = newSleepers
+	fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
+	fc.time = end
+}
+
+// BlockUntil will block until the fakeClock has the given number of sleepers
+// (callers of Sleep or After)
+func (fc *fakeClock) BlockUntil(n int) {
+	fc.l.Lock()
+	// Fast path: current number of sleepers is what we're looking for
+	if len(fc.sleepers) == n {
+		fc.l.Unlock()
+		return
+	}
+	// Otherwise, set up a new blocker
+	b := &blocker{
+		count: n,
+		ch:    make(chan struct{}),
+	}
+	fc.blockers = append(fc.blockers, b)
+	fc.l.Unlock()
+	<-b.ch
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/LICENSE b/vendor/github.com/russellhaering/goxmldsig/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..67db8588217f266eb561f75fae738656325deac9
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/LICENSE
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/russellhaering/goxmldsig/README.md b/vendor/github.com/russellhaering/goxmldsig/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5fc3bdbceb2dfb1f57b3642502a79c6aa3f28cd5
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/README.md
@@ -0,0 +1,90 @@
+# goxmldsig
+
+[![Build Status](https://travis-ci.org/russellhaering/goxmldsig.svg?branch=master)](https://travis-ci.org/russellhaering/goxmldsig)
+[![GoDoc](https://godoc.org/github.com/russellhaering/goxmldsig?status.svg)](https://godoc.org/github.com/russellhaering/goxmldsig)
+
+XML Digital Signatures implemented in pure Go.
+
+## Installation
+
+Install `goxmldsig` into your `$GOPATH` using `go get`:
+
+```
+$ go get github.com/russellhaering/goxmldsig
+```
+
+## Usage
+
+### Signing
+
+```go
+package main
+
+import (
+    "github.com/beevik/etree"
+    "github.com/russellhaering/goxmldsig"
+)
+
+func main() {
+    // Generate a key and self-signed certificate for signing
+    randomKeyStore := dsig.RandomKeyStoreForTest()
+    ctx := dsig.NewDefaultSigningContext(randomKeyStore)
+    elementToSign := &etree.Element{
+        Tag: "ExampleElement",
+    }
+    elementToSign.CreateAttr("ID", "id1234")
+
+    // Sign the element
+    signedElement, err := ctx.SignEnveloped(elementToSign)
+    if err != nil {
+        panic(err)
+    }
+
+    // Serialize the signed element. It is important not to modify the element
+    // after it has been signed - even pretty-printing the XML will invalidate
+    // the signature.
+    doc := etree.NewDocument()
+    doc.SetRoot(signedElement)
+    str, err := doc.WriteToString()
+    if err != nil {
+        panic(err)
+    }
+
+    println(str)
+}
+```
+
+### Signature Validation
+
+```go
+// Validate an element against a root certificate
+func validate(root *x509.Certificate, el *etree.Element) {
+    // Construct a signing context with one or more roots of trust.
+    ctx := dsig.NewDefaultValidationContext(&dsig.MemoryX509CertificateStore{
+        Roots: []*x509.Certificate{root},
+    })
+
+    // It is important to only use the returned validated element.
+    // See: https://www.w3.org/TR/xmldsig-bestpractices/#check-what-is-signed
+    validated, err := ctx.Validate(el)
+    if err != nil {
+        panic(err)
+    }
+
+    doc := etree.NewDocument()
+    doc.SetRoot(validated)
+    str, err := doc.WriteToString()
+    if err != nil {
+        panic(err)
+    }
+
+    println(str)
+}
+```
+
+## Limitations
+
+This library was created in order to [implement SAML 2.0](https://github.com/russellhaering/gosaml2)
+without needing to execute a command line tool to create and validate signatures. It currently
+only implements the subset of relevant standards needed to support that implementation, but
+I hope to make it more complete over time. Contributions are welcome.
diff --git a/vendor/github.com/russellhaering/goxmldsig/canonicalize.go b/vendor/github.com/russellhaering/goxmldsig/canonicalize.go
new file mode 100644
index 0000000000000000000000000000000000000000..34e1a581ffd037ae95050b8fc27221e9d4d21670
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/canonicalize.go
@@ -0,0 +1,128 @@
+package dsig
+
+import (
+	"sort"
+
+	"github.com/beevik/etree"
+	"github.com/russellhaering/goxmldsig/etreeutils"
+)
+
+// Canonicalizer is an implementation of a canonicalization algorithm.
+type Canonicalizer interface {
+	Canonicalize(el *etree.Element) ([]byte, error)
+	Algorithm() AlgorithmID
+}
+
+type c14N10ExclusiveCanonicalizer struct {
+	prefixList string
+}
+
+// MakeC14N10ExclusiveCanonicalizerWithPrefixList constructs an exclusive Canonicalizer
+// from a PrefixList in NMTOKENS format (a white space separated list).
+func MakeC14N10ExclusiveCanonicalizerWithPrefixList(prefixList string) Canonicalizer {
+	return &c14N10ExclusiveCanonicalizer{
+		prefixList: prefixList,
+	}
+}
+
+// Canonicalize transforms the input Element into a serialized XML document in canonical form.
+func (c *c14N10ExclusiveCanonicalizer) Canonicalize(el *etree.Element) ([]byte, error) {
+	err := etreeutils.TransformExcC14n(el, c.prefixList)
+	if err != nil {
+		return nil, err
+	}
+
+	return canonicalSerialize(el)
+}
+
+func (c *c14N10ExclusiveCanonicalizer) Algorithm() AlgorithmID {
+	return CanonicalXML10ExclusiveAlgorithmId
+}
+
+type c14N11Canonicalizer struct{}
+
+// MakeC14N11Canonicalizer constructs an inclusive canonicalizer.
+func MakeC14N11Canonicalizer() Canonicalizer {
+	return &c14N11Canonicalizer{}
+}
+
+// Canonicalize transforms the input Element into a serialized XML document in canonical form.
+func (c *c14N11Canonicalizer) Canonicalize(el *etree.Element) ([]byte, error) {
+	scope := make(map[string]struct{})
+	return canonicalSerialize(canonicalPrep(el, scope))
+}
+
+func (c *c14N11Canonicalizer) Algorithm() AlgorithmID {
+	return CanonicalXML11AlgorithmId
+}
+
+func composeAttr(space, key string) string {
+	if space != "" {
+		return space + ":" + key
+	}
+
+	return key
+}
+
+type c14nSpace struct {
+	a    etree.Attr
+	used bool
+}
+
+const nsSpace = "xmlns"
+
+// canonicalPrep accepts an *etree.Element and transforms it into one which is ready
+// for serialization into inclusive canonical form. Specifically this
+// entails:
+//
+// 1. Stripping re-declarations of namespaces
+// 2. Sorting attributes into canonical order
+//
+// Inclusive canonicalization does not strip unused namespaces.
+//
+// TODO(russell_h): This is very similar to excCanonicalPrep - perhaps they should
+// be unified into one parameterized function?
+func canonicalPrep(el *etree.Element, seenSoFar map[string]struct{}) *etree.Element {
+	_seenSoFar := make(map[string]struct{})
+	for k, v := range seenSoFar {
+		_seenSoFar[k] = v
+	}
+
+	ne := el.Copy()
+	sort.Sort(etreeutils.SortedAttrs(ne.Attr))
+	if len(ne.Attr) != 0 {
+		for _, attr := range ne.Attr {
+			if attr.Space != nsSpace {
+				continue
+			}
+			key := attr.Space + ":" + attr.Key
+			if _, seen := _seenSoFar[key]; seen {
+				ne.RemoveAttr(attr.Space + ":" + attr.Key)
+			} else {
+				_seenSoFar[key] = struct{}{}
+			}
+		}
+	}
+
+	for i, token := range ne.Child {
+		childElement, ok := token.(*etree.Element)
+		if ok {
+			ne.Child[i] = canonicalPrep(childElement, _seenSoFar)
+		}
+	}
+
+	return ne
+}
+
+func canonicalSerialize(el *etree.Element) ([]byte, error) {
+	doc := etree.NewDocument()
+	doc.SetRoot(el.Copy())
+
+	doc.WriteSettings = etree.WriteSettings{
+		CanonicalAttrVal: true,
+		CanonicalEndTags: true,
+		CanonicalText:    true,
+	}
+
+	return doc.WriteToBytes()
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/clock.go b/vendor/github.com/russellhaering/goxmldsig/clock.go
new file mode 100644
index 0000000000000000000000000000000000000000..cceaaa546002387b05106c88a7d2854766dd1c9c
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/clock.go
@@ -0,0 +1,55 @@
+package dsig
+
+import (
+	"time"
+
+	"github.com/jonboulle/clockwork"
+)
+
+// Clock wraps a clockwork.Clock (which could be real or fake) in order
+// to default to a real clock when a nil *Clock is used. In other words,
+// if you attempt to use a nil *Clock it will defer to the real system
+// clock. This allows Clock to be easily added to structs with methods
+// that currently reference the time package, without requiring every
+// instantiation of that struct to be updated.
+type Clock struct {
+	wrapped clockwork.Clock
+}
+
+func (c *Clock) getWrapped() clockwork.Clock {
+	if c == nil {
+		return clockwork.NewRealClock()
+	}
+
+	return c.wrapped
+}
+
+func (c *Clock) After(d time.Duration) <-chan time.Time {
+	return c.getWrapped().After(d)
+}
+
+func (c *Clock) Sleep(d time.Duration) {
+	c.getWrapped().Sleep(d)
+}
+
+func (c *Clock) Now() time.Time {
+	return c.getWrapped().Now()
+}
+
+func NewRealClock() *Clock {
+	return &Clock{
+		wrapped: clockwork.NewRealClock(),
+	}
+}
+
+func NewFakeClock(wrapped clockwork.Clock) *Clock {
+	return &Clock{
+		wrapped: wrapped,
+	}
+}
+
+func NewFakeClockAt(t time.Time) *Clock {
+	return &Clock{
+		wrapped: clockwork.NewFakeClockAt(t),
+	}
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/etreeutils/canonicalize.go b/vendor/github.com/russellhaering/goxmldsig/etreeutils/canonicalize.go
new file mode 100644
index 0000000000000000000000000000000000000000..9e6df954d230fef099d9fbc7deb5eff5244dacc0
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/etreeutils/canonicalize.go
@@ -0,0 +1,98 @@
+package etreeutils
+
+import (
+	"sort"
+	"strings"
+
+	"github.com/beevik/etree"
+)
+
+// TransformExcC14n transforms the passed element into xml-exc-c14n form.
+func TransformExcC14n(el *etree.Element, inclusiveNamespacesPrefixList string) error {
+	prefixes := strings.Fields(inclusiveNamespacesPrefixList)
+	prefixSet := make(map[string]struct{}, len(prefixes))
+
+	for _, prefix := range prefixes {
+		prefixSet[prefix] = struct{}{}
+	}
+
+	err := transformExcC14n(DefaultNSContext, EmptyNSContext, el, prefixSet)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func transformExcC14n(ctx, declared NSContext, el *etree.Element, inclusiveNamespaces map[string]struct{}) error {
+	scope, err := ctx.SubContext(el)
+	if err != nil {
+		return err
+	}
+
+	visiblyUtilizedPrefixes := map[string]struct{}{
+		el.Space: struct{}{},
+	}
+
+	filteredAttrs := []etree.Attr{}
+
+	// Filter out all namespace declarations
+	for _, attr := range el.Attr {
+		switch {
+		case attr.Space == xmlnsPrefix:
+			if _, ok := inclusiveNamespaces[attr.Key]; ok {
+				visiblyUtilizedPrefixes[attr.Key] = struct{}{}
+			}
+
+		case attr.Space == defaultPrefix && attr.Key == xmlnsPrefix:
+			if _, ok := inclusiveNamespaces[defaultPrefix]; ok {
+				visiblyUtilizedPrefixes[defaultPrefix] = struct{}{}
+			}
+
+		default:
+			if attr.Space != defaultPrefix {
+				visiblyUtilizedPrefixes[attr.Space] = struct{}{}
+			}
+
+			filteredAttrs = append(filteredAttrs, attr)
+		}
+	}
+
+	el.Attr = filteredAttrs
+
+	declared = declared.Copy()
+
+	// Declare all visibly utilized prefixes that are in-scope but haven't
+	// been declared in the canonicalized form yet. These might have been
+	// declared on this element but then filtered out above, or they might
+	// have been declared on an ancestor (before canonicalization) which
+	// didn't visibly utilize and thus had them removed.
+	for prefix := range visiblyUtilizedPrefixes {
+		// Skip redundant declarations - they have to already have the same
+		// value.
+		if declaredNamespace, ok := declared.prefixes[prefix]; ok {
+			if value, ok := scope.prefixes[prefix]; ok && declaredNamespace == value {
+				continue
+			}
+		}
+
+		namespace, err := scope.LookupPrefix(prefix)
+		if err != nil {
+			return err
+		}
+
+		el.Attr = append(el.Attr, declared.declare(prefix, namespace))
+	}
+
+	sort.Sort(SortedAttrs(el.Attr))
+
+	// Transform child elements
+	for _, child := range el.ChildElements() {
+		err := transformExcC14n(scope, declared, child, inclusiveNamespaces)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/etreeutils/namespace.go b/vendor/github.com/russellhaering/goxmldsig/etreeutils/namespace.go
new file mode 100644
index 0000000000000000000000000000000000000000..45f3bea1647c59e0290c57a0ad66377fce898889
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/etreeutils/namespace.go
@@ -0,0 +1,328 @@
+package etreeutils
+
+import (
+	"errors"
+
+	"fmt"
+
+	"sort"
+
+	"github.com/beevik/etree"
+)
+
+const (
+	defaultPrefix = ""
+	xmlnsPrefix   = "xmlns"
+	xmlPrefix     = "xml"
+
+	XMLNamespace   = "http://www.w3.org/XML/1998/namespace"
+	XMLNSNamespace = "http://www.w3.org/2000/xmlns/"
+)
+
+var (
+	DefaultNSContext = NSContext{
+		prefixes: map[string]string{
+			defaultPrefix: XMLNamespace,
+			xmlPrefix:     XMLNamespace,
+			xmlnsPrefix:   XMLNSNamespace,
+		},
+	}
+
+	EmptyNSContext = NSContext{}
+
+	ErrReservedNamespace       = errors.New("disallowed declaration of reserved namespace")
+	ErrInvalidDefaultNamespace = errors.New("invalid default namespace declaration")
+	ErrTraversalHalted         = errors.New("traversal halted")
+)
+
+type ErrUndeclaredNSPrefix struct {
+	Prefix string
+}
+
+func (e ErrUndeclaredNSPrefix) Error() string {
+	return fmt.Sprintf("undeclared namespace prefix: '%s'", e.Prefix)
+}
+
+type NSContext struct {
+	prefixes map[string]string
+}
+
+func (ctx NSContext) Copy() NSContext {
+	prefixes := make(map[string]string, len(ctx.prefixes)+4)
+	for k, v := range ctx.prefixes {
+		prefixes[k] = v
+	}
+
+	return NSContext{prefixes: prefixes}
+}
+
+func (ctx NSContext) declare(prefix, namespace string) etree.Attr {
+	ctx.prefixes[prefix] = namespace
+
+	switch prefix {
+	case defaultPrefix:
+		return etree.Attr{
+			Key:   xmlnsPrefix,
+			Value: namespace,
+		}
+
+	default:
+		return etree.Attr{
+			Space: xmlnsPrefix,
+			Key:   prefix,
+			Value: namespace,
+		}
+	}
+}
+
+func (ctx NSContext) SubContext(el *etree.Element) (NSContext, error) {
+	// The subcontext should inherit existing declared prefixes
+	newCtx := ctx.Copy()
+
+	// Merge new namespace declarations on top of existing ones.
+	for _, attr := range el.Attr {
+		if attr.Space == xmlnsPrefix {
+			// This attribute is a namespace declaration of the form "xmlns:<prefix>"
+
+			// The 'xml' namespace may only be re-declared with the name 'http://www.w3.org/XML/1998/namespace'
+			if attr.Key == xmlPrefix && attr.Value != XMLNamespace {
+				return ctx, ErrReservedNamespace
+			}
+
+			// The 'xmlns' namespace may not be re-declared
+			if attr.Key == xmlnsPrefix {
+				return ctx, ErrReservedNamespace
+			}
+
+			newCtx.declare(attr.Key, attr.Value)
+		} else if attr.Space == defaultPrefix && attr.Key == xmlnsPrefix {
+			// This attribute is a default namespace declaration
+
+			// The xmlns namespace value may not be declared as the default namespace
+			if attr.Value == XMLNSNamespace {
+				return ctx, ErrInvalidDefaultNamespace
+			}
+
+			newCtx.declare(defaultPrefix, attr.Value)
+		}
+	}
+
+	return newCtx, nil
+}
+
+// Prefixes returns a copy of this context's prefix map.
+func (ctx NSContext) Prefixes() map[string]string {
+	prefixes := make(map[string]string, len(ctx.prefixes))
+	for k, v := range ctx.prefixes {
+		prefixes[k] = v
+	}
+
+	return prefixes
+}
+
+// LookupPrefix attempts to find a declared namespace for the specified prefix. If the prefix
+// is an empty string this will be the default namespace for this context. If the prefix is
+// undeclared in this context an ErrUndeclaredNSPrefix will be returned.
+func (ctx NSContext) LookupPrefix(prefix string) (string, error) {
+	if namespace, ok := ctx.prefixes[prefix]; ok {
+		return namespace, nil
+	}
+
+	return "", ErrUndeclaredNSPrefix{
+		Prefix: prefix,
+	}
+}
+
+// NSIterHandler is a function which is invoked with a element and its surrounding
+// NSContext during traversals.
+type NSIterHandler func(NSContext, *etree.Element) error
+
+// NSTraverse traverses an element tree, invoking the passed handler for each element
+// in the tree.
+func NSTraverse(ctx NSContext, el *etree.Element, handle NSIterHandler) error {
+	ctx, err := ctx.SubContext(el)
+	if err != nil {
+		return err
+	}
+
+	err = handle(ctx, el)
+	if err != nil {
+		return err
+	}
+
+	// Recursively traverse child elements.
+	for _, child := range el.ChildElements() {
+		err := NSTraverse(ctx, child, handle)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// NSDetatch makes a copy of the passed element, and declares any namespaces in
+// the passed context onto the new element before returning it.
+func NSDetatch(ctx NSContext, el *etree.Element) (*etree.Element, error) {
+	ctx, err := ctx.SubContext(el)
+	if err != nil {
+		return nil, err
+	}
+
+	el = el.Copy()
+
+	// Build a new attribute list
+	attrs := make([]etree.Attr, 0, len(el.Attr))
+
+	// First copy over anything that isn't a namespace declaration
+	for _, attr := range el.Attr {
+		if attr.Space == xmlnsPrefix {
+			continue
+		}
+
+		if attr.Space == defaultPrefix && attr.Key == xmlnsPrefix {
+			continue
+		}
+
+		attrs = append(attrs, attr)
+	}
+
+	// Append all in-context namespace declarations
+	for prefix, namespace := range ctx.prefixes {
+		// Skip the implicit "xml" and "xmlns" prefix declarations
+		if prefix == xmlnsPrefix || prefix == xmlPrefix {
+			continue
+		}
+
+		// Also skip declararing the default namespace as XMLNamespace
+		if prefix == defaultPrefix && namespace == XMLNamespace {
+			continue
+		}
+
+		if prefix != defaultPrefix {
+			attrs = append(attrs, etree.Attr{
+				Space: xmlnsPrefix,
+				Key:   prefix,
+				Value: namespace,
+			})
+		} else {
+			attrs = append(attrs, etree.Attr{
+				Key:   xmlnsPrefix,
+				Value: namespace,
+			})
+		}
+	}
+
+	sort.Sort(SortedAttrs(attrs))
+
+	el.Attr = attrs
+
+	return el, nil
+}
+
+// NSSelectOne behaves identically to NSSelectOneCtx, but uses DefaultNSContext as the
+// surrounding context.
+func NSSelectOne(el *etree.Element, namespace, tag string) (*etree.Element, error) {
+	return NSSelectOneCtx(DefaultNSContext, el, namespace, tag)
+}
+
+// NSSelectOneCtx conducts a depth-first search for an element with the specified namespace
+// and tag. If such an element is found, a new *etree.Element is returned which is a
+// copy of the found element, but with all in-context namespace declarations attached
+// to the element as attributes.
+func NSSelectOneCtx(ctx NSContext, el *etree.Element, namespace, tag string) (*etree.Element, error) {
+	var found *etree.Element
+
+	err := NSFindIterateCtx(ctx, el, namespace, tag, func(ctx NSContext, el *etree.Element) error {
+		var err error
+
+		found, err = NSDetatch(ctx, el)
+		if err != nil {
+			return err
+		}
+
+		return ErrTraversalHalted
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	return found, nil
+}
+
+// NSFindIterate behaves identically to NSFindIterateCtx, but uses DefaultNSContext
+// as the surrounding context.
+func NSFindIterate(el *etree.Element, namespace, tag string, handle NSIterHandler) error {
+	return NSFindIterateCtx(DefaultNSContext, el, namespace, tag, handle)
+}
+
+// NSFindIterateCtx conducts a depth-first traversal searching for elements with the
+// specified tag in the specified namespace. It uses the passed NSContext for prefix
+// lookups. For each such element, the passed handler function is invoked. If the
+// handler function returns an error traversal is immediately halted. If the error
+// returned by the handler is  ErrTraversalHalted then nil will be returned by
+// NSFindIterate. If any other error is returned by the handler, that error will be
+// returned by NSFindIterate.
+func NSFindIterateCtx(ctx NSContext, el *etree.Element, namespace, tag string, handle NSIterHandler) error {
+	err := NSTraverse(ctx, el, func(ctx NSContext, el *etree.Element) error {
+		currentNS, err := ctx.LookupPrefix(el.Space)
+		if err != nil {
+			return err
+		}
+
+		// Base case, el is the sought after element.
+		if currentNS == namespace && el.Tag == tag {
+			return handle(ctx, el)
+		}
+
+		return nil
+	})
+
+	if err != nil && err != ErrTraversalHalted {
+		return err
+	}
+
+	return nil
+}
+
+// NSFindOne behaves identically to NSFindOneCtx, but uses DefaultNSContext for
+// context.
+func NSFindOne(el *etree.Element, namespace, tag string) (*etree.Element, error) {
+	return NSFindOneCtx(DefaultNSContext, el, namespace, tag)
+}
+
+// NSFindOneCtx conducts a depth-first search for the specified element. If such an element
+// is found a reference to it is returned.
+func NSFindOneCtx(ctx NSContext, el *etree.Element, namespace, tag string) (*etree.Element, error) {
+	var found *etree.Element
+
+	err := NSFindIterateCtx(ctx, el, namespace, tag, func(ctx NSContext, el *etree.Element) error {
+		found = el
+		return ErrTraversalHalted
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	return found, nil
+}
+
+// NSBuildParentContext recurses upward from an element in order to build an NSContext
+// for its immediate parent. If the element has no parent DefaultNSContext
+// is returned.
+func NSBuildParentContext(el *etree.Element) (NSContext, error) {
+	parent := el.Parent()
+	if parent == nil {
+		return DefaultNSContext, nil
+	}
+
+	ctx, err := NSBuildParentContext(parent)
+
+	if err != nil {
+		return ctx, err
+	}
+
+	return ctx.SubContext(parent)
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/etreeutils/sort.go b/vendor/github.com/russellhaering/goxmldsig/etreeutils/sort.go
new file mode 100644
index 0000000000000000000000000000000000000000..5871a3913de94d4a3b2437db980326c3250be97f
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/etreeutils/sort.go
@@ -0,0 +1,66 @@
+package etreeutils
+
+import "github.com/beevik/etree"
+
+// SortedAttrs provides sorting capabilities, compatible with XML C14N, on top
+// of an []etree.Attr
+type SortedAttrs []etree.Attr
+
+func (a SortedAttrs) Len() int {
+	return len(a)
+}
+
+func (a SortedAttrs) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func (a SortedAttrs) Less(i, j int) bool {
+	// This is the best reference I've found on sort order:
+	// http://dst.lbl.gov/~ksb/Scratch/XMLC14N.html
+
+	// If attr j is a default namespace declaration, attr i may
+	// not be strictly "less" than it.
+	if a[j].Space == defaultPrefix && a[j].Key == xmlnsPrefix {
+		return false
+	}
+
+	// Otherwise, if attr i is a default namespace declaration, it
+	// must be less than anything else.
+	if a[i].Space == defaultPrefix && a[i].Key == xmlnsPrefix {
+		return true
+	}
+
+	// Next, namespace prefix declarations, sorted by prefix, come before
+	// anythign else.
+	if a[i].Space == xmlnsPrefix {
+		if a[j].Space == xmlnsPrefix {
+			return a[i].Key < a[j].Key
+		}
+		return true
+	}
+
+	if a[j].Space == xmlnsPrefix {
+		return false
+	}
+
+	// Then come unprefixed attributes, sorted by key.
+	if a[i].Space == defaultPrefix {
+		if a[j].Space == defaultPrefix {
+			return a[i].Key < a[j].Key
+		}
+		return true
+	}
+
+	if a[j].Space == defaultPrefix {
+		return false
+	}
+
+	// Wow. We're still going. Finally, attributes in the same namespace should be
+	// sorted by key. Attributes in different namespaces should be sorted by the
+	// actual namespace (_not_ the prefix). For now just use the prefix.
+	if a[i].Space == a[j].Space {
+		return a[i].Key < a[j].Key
+	}
+
+	return a[i].Space < a[j].Space
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/etreeutils/unmarshal.go b/vendor/github.com/russellhaering/goxmldsig/etreeutils/unmarshal.go
new file mode 100644
index 0000000000000000000000000000000000000000..b1fecf85a4cdff540e75e24eba8c817899e42af3
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/etreeutils/unmarshal.go
@@ -0,0 +1,43 @@
+package etreeutils
+
+import (
+	"encoding/xml"
+
+	"github.com/beevik/etree"
+)
+
+// NSUnmarshalElement unmarshals the passed etree Element into the value pointed to by
+// v using encoding/xml in the context of the passed NSContext. If v implements
+// ElementKeeper, SetUnderlyingElement will be called on v with a reference to el.
+func NSUnmarshalElement(ctx NSContext, el *etree.Element, v interface{}) error {
+	detatched, err := NSDetatch(ctx, el)
+	if err != nil {
+		return err
+	}
+
+	doc := etree.NewDocument()
+	doc.AddChild(detatched)
+	data, err := doc.WriteToBytes()
+	if err != nil {
+		return err
+	}
+
+	err = xml.Unmarshal(data, v)
+	if err != nil {
+		return err
+	}
+
+	switch v := v.(type) {
+	case ElementKeeper:
+		v.SetUnderlyingElement(el)
+	}
+
+	return nil
+}
+
+// ElementKeeper should be implemented by types which will be passed to
+// UnmarshalElement, but wish to keep a reference
+type ElementKeeper interface {
+	SetUnderlyingElement(*etree.Element)
+	UnderlyingElement() *etree.Element
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/keystore.go b/vendor/github.com/russellhaering/goxmldsig/keystore.go
new file mode 100644
index 0000000000000000000000000000000000000000..81487f080e3c3a9770da9027f93e9ff0799a79f1
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/keystore.go
@@ -0,0 +1,63 @@
+package dsig
+
+import (
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"math/big"
+	"time"
+)
+
+type X509KeyStore interface {
+	GetKeyPair() (privateKey *rsa.PrivateKey, cert []byte, err error)
+}
+
+type X509CertificateStore interface {
+	Certificates() (roots []*x509.Certificate, err error)
+}
+
+type MemoryX509CertificateStore struct {
+	Roots []*x509.Certificate
+}
+
+func (mX509cs *MemoryX509CertificateStore) Certificates() ([]*x509.Certificate, error) {
+	return mX509cs.Roots, nil
+}
+
+type MemoryX509KeyStore struct {
+	privateKey *rsa.PrivateKey
+	cert       []byte
+}
+
+func (ks *MemoryX509KeyStore) GetKeyPair() (*rsa.PrivateKey, []byte, error) {
+	return ks.privateKey, ks.cert, nil
+}
+
+func RandomKeyStoreForTest() X509KeyStore {
+	key, err := rsa.GenerateKey(rand.Reader, 1024)
+	if err != nil {
+		panic(err)
+	}
+
+	now := time.Now()
+
+	template := &x509.Certificate{
+		SerialNumber: big.NewInt(0),
+		NotBefore:    now.Add(-5 * time.Minute),
+		NotAfter:     now.Add(365 * 24 * time.Hour),
+
+		KeyUsage:              x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:           []x509.ExtKeyUsage{},
+		BasicConstraintsValid: true,
+	}
+
+	cert, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key)
+	if err != nil {
+		panic(err)
+	}
+
+	return &MemoryX509KeyStore{
+		privateKey: key,
+		cert:       cert,
+	}
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/run_test.sh b/vendor/github.com/russellhaering/goxmldsig/run_test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..cfe5b2ea963b007cfc8a8c17ecde96e43c0c4eda
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/run_test.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+cd `dirname $0`
+DIRS=`git grep -l 'func Test' | xargs dirname | sort -u`
+for DIR in $DIRS
+do
+	echo
+	echo "dir: $DIR"
+	echo "======================================"
+	pushd $DIR >/dev/null
+	go test -v || exit 1
+	popd >/dev/null
+done
diff --git a/vendor/github.com/russellhaering/goxmldsig/sign.go b/vendor/github.com/russellhaering/goxmldsig/sign.go
new file mode 100644
index 0000000000000000000000000000000000000000..a62f837d5850395bc9a6e93a033525b3769b2f37
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/sign.go
@@ -0,0 +1,211 @@
+package dsig
+
+import (
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	_ "crypto/sha1"
+	_ "crypto/sha256"
+	"encoding/base64"
+	"errors"
+	"fmt"
+
+	"github.com/beevik/etree"
+	"github.com/russellhaering/goxmldsig/etreeutils"
+)
+
+type SigningContext struct {
+	Hash          crypto.Hash
+	KeyStore      X509KeyStore
+	IdAttribute   string
+	Prefix        string
+	Canonicalizer Canonicalizer
+}
+
+func NewDefaultSigningContext(ks X509KeyStore) *SigningContext {
+	return &SigningContext{
+		Hash:          crypto.SHA256,
+		KeyStore:      ks,
+		IdAttribute:   DefaultIdAttr,
+		Prefix:        DefaultPrefix,
+		Canonicalizer: MakeC14N11Canonicalizer(),
+	}
+}
+
+func (ctx *SigningContext) SetSignatureMethod(algorithmID string) error {
+	hash, ok := signatureMethodsByIdentifier[algorithmID]
+	if !ok {
+		return fmt.Errorf("Unknown SignatureMethod: %s", algorithmID)
+	}
+
+	ctx.Hash = hash
+
+	return nil
+}
+
+func (ctx *SigningContext) digest(el *etree.Element) ([]byte, error) {
+	canonical, err := ctx.Canonicalizer.Canonicalize(el)
+	if err != nil {
+		return nil, err
+	}
+
+	hash := ctx.Hash.New()
+	_, err = hash.Write(canonical)
+	if err != nil {
+		return nil, err
+	}
+
+	return hash.Sum(nil), nil
+}
+
+func (ctx *SigningContext) constructSignedInfo(el *etree.Element, enveloped bool) (*etree.Element, error) {
+	digestAlgorithmIdentifier, ok := digestAlgorithmIdentifiers[ctx.Hash]
+	if !ok {
+		return nil, errors.New("unsupported hash mechanism")
+	}
+
+	signatureMethodIdentifier, ok := signatureMethodIdentifiers[ctx.Hash]
+	if !ok {
+		return nil, errors.New("unsupported signature method")
+	}
+
+	digest, err := ctx.digest(el)
+	if err != nil {
+		return nil, err
+	}
+
+	signedInfo := &etree.Element{
+		Tag:   SignedInfoTag,
+		Space: ctx.Prefix,
+	}
+
+	// /SignedInfo/CanonicalizationMethod
+	canonicalizationMethod := ctx.createNamespacedElement(signedInfo, CanonicalizationMethodTag)
+	canonicalizationMethod.CreateAttr(AlgorithmAttr, string(ctx.Canonicalizer.Algorithm()))
+
+	// /SignedInfo/SignatureMethod
+	signatureMethod := ctx.createNamespacedElement(signedInfo, SignatureMethodTag)
+	signatureMethod.CreateAttr(AlgorithmAttr, signatureMethodIdentifier)
+
+	// /SignedInfo/Reference
+	reference := ctx.createNamespacedElement(signedInfo, ReferenceTag)
+
+	dataId := el.SelectAttrValue(ctx.IdAttribute, "")
+	if dataId == "" {
+		return nil, errors.New("Missing data ID")
+	}
+
+	reference.CreateAttr(URIAttr, "#"+dataId)
+
+	// /SignedInfo/Reference/Transforms
+	transforms := ctx.createNamespacedElement(reference, TransformsTag)
+	if enveloped {
+		envelopedTransform := ctx.createNamespacedElement(transforms, TransformTag)
+		envelopedTransform.CreateAttr(AlgorithmAttr, EnvelopedSignatureAltorithmId.String())
+	}
+	canonicalizationAlgorithm := ctx.createNamespacedElement(transforms, TransformTag)
+	canonicalizationAlgorithm.CreateAttr(AlgorithmAttr, string(ctx.Canonicalizer.Algorithm()))
+
+	// /SignedInfo/Reference/DigestMethod
+	digestMethod := ctx.createNamespacedElement(reference, DigestMethodTag)
+	digestMethod.CreateAttr(AlgorithmAttr, digestAlgorithmIdentifier)
+
+	// /SignedInfo/Reference/DigestValue
+	digestValue := ctx.createNamespacedElement(reference, DigestValueTag)
+	digestValue.SetText(base64.StdEncoding.EncodeToString(digest))
+
+	return signedInfo, nil
+}
+
+func (ctx *SigningContext) ConstructSignature(el *etree.Element, enveloped bool) (*etree.Element, error) {
+	signedInfo, err := ctx.constructSignedInfo(el, enveloped)
+	if err != nil {
+		return nil, err
+	}
+
+	sig := &etree.Element{
+		Tag:   SignatureTag,
+		Space: ctx.Prefix,
+	}
+
+	xmlns := "xmlns"
+	if ctx.Prefix != "" {
+		xmlns += ":" + ctx.Prefix
+	}
+
+	sig.CreateAttr(xmlns, Namespace)
+	sig.AddChild(signedInfo)
+
+	// When using xml-c14n11 (ie, non-exclusive canonicalization) the canonical form
+	// of the SignedInfo must declare all namespaces that are in scope at it's final
+	// enveloped location in the document. In order to do that, we're going to construct
+	// a series of cascading NSContexts to capture namespace declarations:
+
+	// First get the context surrounding the element we are signing.
+	rootNSCtx, err := etreeutils.NSBuildParentContext(el)
+	if err != nil {
+		return nil, err
+	}
+
+	// Then capture any declarations on the element itself.
+	elNSCtx, err := rootNSCtx.SubContext(el)
+	if err != nil {
+		return nil, err
+	}
+
+	// Followed by declarations on the Signature (which we just added above)
+	sigNSCtx, err := elNSCtx.SubContext(sig)
+	if err != nil {
+		return nil, err
+	}
+
+	// Finally detatch the SignedInfo in order to capture all of the namespace
+	// declarations in the scope we've constructed.
+	detatchedSignedInfo, err := etreeutils.NSDetatch(sigNSCtx, signedInfo)
+	if err != nil {
+		return nil, err
+	}
+
+	digest, err := ctx.digest(detatchedSignedInfo)
+	if err != nil {
+		return nil, err
+	}
+
+	key, cert, err := ctx.KeyStore.GetKeyPair()
+	if err != nil {
+		return nil, err
+	}
+
+	rawSignature, err := rsa.SignPKCS1v15(rand.Reader, key, ctx.Hash, digest)
+	if err != nil {
+		return nil, err
+	}
+
+	signatureValue := ctx.createNamespacedElement(sig, SignatureValueTag)
+	signatureValue.SetText(base64.StdEncoding.EncodeToString(rawSignature))
+
+	keyInfo := ctx.createNamespacedElement(sig, KeyInfoTag)
+	x509Data := ctx.createNamespacedElement(keyInfo, X509DataTag)
+	x509Certificate := ctx.createNamespacedElement(x509Data, X509CertificateTag)
+	x509Certificate.SetText(base64.StdEncoding.EncodeToString(cert))
+
+	return sig, nil
+}
+
+func (ctx *SigningContext) createNamespacedElement(el *etree.Element, tag string) *etree.Element {
+	child := el.CreateElement(tag)
+	child.Space = ctx.Prefix
+	return child
+}
+
+func (ctx *SigningContext) SignEnveloped(el *etree.Element) (*etree.Element, error) {
+	sig, err := ctx.ConstructSignature(el, true)
+	if err != nil {
+		return nil, err
+	}
+
+	ret := el.Copy()
+	ret.Child = append(ret.Child, sig)
+
+	return ret, nil
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/tls_keystore.go b/vendor/github.com/russellhaering/goxmldsig/tls_keystore.go
new file mode 100644
index 0000000000000000000000000000000000000000..c98f312cae6afcee57d06d9e98c2959e37832d23
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/tls_keystore.go
@@ -0,0 +1,34 @@
+package dsig
+
+import (
+	"crypto/rsa"
+	"crypto/tls"
+	"fmt"
+)
+
+//Well-known errors
+var (
+	ErrNonRSAKey           = fmt.Errorf("Private key was not RSA")
+	ErrMissingCertificates = fmt.Errorf("No public certificates provided")
+)
+
+//TLSCertKeyStore wraps the stdlib tls.Certificate to return its contained key
+//and certs.
+type TLSCertKeyStore tls.Certificate
+
+//GetKeyPair implements X509KeyStore using the underlying tls.Certificate
+func (d TLSCertKeyStore) GetKeyPair() (*rsa.PrivateKey, []byte, error) {
+	pk, ok := d.PrivateKey.(*rsa.PrivateKey)
+
+	if !ok {
+		return nil, nil, ErrNonRSAKey
+	}
+
+	if len(d.Certificate) < 1 {
+		return nil, nil, ErrMissingCertificates
+	}
+
+	crt := d.Certificate[0]
+
+	return pk, crt, nil
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/types/signature.go b/vendor/github.com/russellhaering/goxmldsig/types/signature.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c7b1632a8ebd0140af178f3b9617ccd507ae0e9
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/types/signature.go
@@ -0,0 +1,93 @@
+package types
+
+import (
+	"encoding/xml"
+
+	"github.com/beevik/etree"
+)
+
+type InclusiveNamespaces struct {
+	XMLName    xml.Name `xml:"http://www.w3.org/2001/10/xml-exc-c14n# InclusiveNamespaces"`
+	PrefixList string   `xml:"PrefixList,attr"`
+}
+
+type Transform struct {
+	XMLName             xml.Name             `xml:"http://www.w3.org/2000/09/xmldsig# Transform"`
+	Algorithm           string               `xml:"Algorithm,attr"`
+	InclusiveNamespaces *InclusiveNamespaces `xml:"InclusiveNamespaces"`
+}
+
+type Transforms struct {
+	XMLName    xml.Name    `xml:"http://www.w3.org/2000/09/xmldsig# Transforms"`
+	Transforms []Transform `xml:"Transform"`
+}
+
+type DigestMethod struct {
+	XMLName   xml.Name `xml:"http://www.w3.org/2000/09/xmldsig# DigestMethod"`
+	Algorithm string   `xml:"Algorithm,attr"`
+}
+
+type Reference struct {
+	XMLName     xml.Name     `xml:"http://www.w3.org/2000/09/xmldsig# Reference"`
+	URI         string       `xml:"URI,attr"`
+	DigestValue string       `xml:"DigestValue"`
+	DigestAlgo  DigestMethod `xml:"DigestMethod"`
+	Transforms  Transforms   `xml:"Transforms"`
+}
+
+type CanonicalizationMethod struct {
+	XMLName   xml.Name `xml:"http://www.w3.org/2000/09/xmldsig# CanonicalizationMethod"`
+	Algorithm string   `xml:"Algorithm,attr"`
+}
+
+type SignatureMethod struct {
+	XMLName   xml.Name `xml:"http://www.w3.org/2000/09/xmldsig# SignatureMethod"`
+	Algorithm string   `xml:"Algorithm,attr"`
+}
+
+type SignedInfo struct {
+	XMLName                xml.Name               `xml:"http://www.w3.org/2000/09/xmldsig# SignedInfo"`
+	CanonicalizationMethod CanonicalizationMethod `xml:"CanonicalizationMethod"`
+	SignatureMethod        SignatureMethod        `xml:"SignatureMethod"`
+	References             []Reference            `xml:"Reference"`
+}
+
+type SignatureValue struct {
+	XMLName xml.Name `xml:"http://www.w3.org/2000/09/xmldsig# SignatureValue"`
+	Data    string   `xml:",chardata"`
+}
+
+type KeyInfo struct {
+	XMLName  xml.Name `xml:"http://www.w3.org/2000/09/xmldsig# KeyInfo"`
+	X509Data X509Data `xml:"X509Data"`
+}
+
+type X509Data struct {
+	XMLName         xml.Name        `xml:"http://www.w3.org/2000/09/xmldsig# X509Data"`
+	X509Certificate X509Certificate `xml:"X509Certificate"`
+}
+
+type X509Certificate struct {
+	XMLName xml.Name `xml:"http://www.w3.org/2000/09/xmldsig# X509Certificate"`
+	Data    string   `xml:",chardata"`
+}
+
+type Signature struct {
+	XMLName        xml.Name        `xml:"http://www.w3.org/2000/09/xmldsig# Signature"`
+	SignedInfo     *SignedInfo     `xml:"SignedInfo"`
+	SignatureValue *SignatureValue `xml:"SignatureValue"`
+	KeyInfo        *KeyInfo        `xml:"KeyInfo"`
+	el             *etree.Element
+}
+
+// SetUnderlyingElement will be called with a reference to the Element this Signature
+// was unmarshaled from.
+func (s *Signature) SetUnderlyingElement(el *etree.Element) {
+	s.el = el
+}
+
+// UnderlyingElement returns a reference to the Element this signature was unmarshaled
+// from, where applicable.
+func (s *Signature) UnderlyingElement() *etree.Element {
+	return s.el
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/validate.go b/vendor/github.com/russellhaering/goxmldsig/validate.go
new file mode 100644
index 0000000000000000000000000000000000000000..0489963c9b68e025fcb6b31d2954129c4089489c
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/validate.go
@@ -0,0 +1,419 @@
+package dsig
+
+import (
+	"bytes"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"regexp"
+
+	"github.com/beevik/etree"
+	"github.com/russellhaering/goxmldsig/etreeutils"
+	"github.com/russellhaering/goxmldsig/types"
+)
+
+var uriRegexp = regexp.MustCompile("^#[a-zA-Z_][\\w.-]*$")
+var whiteSpace = regexp.MustCompile("\\s+")
+
+var (
+	// ErrMissingSignature indicates that no enveloped signature was found referencing
+	// the top level element passed for signature verification.
+	ErrMissingSignature = errors.New("Missing signature referencing the top-level element")
+)
+
+type ValidationContext struct {
+	CertificateStore X509CertificateStore
+	IdAttribute      string
+	Clock            *Clock
+}
+
+func NewDefaultValidationContext(certificateStore X509CertificateStore) *ValidationContext {
+	return &ValidationContext{
+		CertificateStore: certificateStore,
+		IdAttribute:      DefaultIdAttr,
+	}
+}
+
+// TODO(russell_h): More flexible namespace support. This might barely work.
+func inNamespace(el *etree.Element, ns string) bool {
+	for _, attr := range el.Attr {
+		if attr.Value == ns {
+			if attr.Space == "" && attr.Key == "xmlns" {
+				return el.Space == ""
+			} else if attr.Space == "xmlns" {
+				return el.Space == attr.Key
+			}
+		}
+	}
+
+	return false
+}
+
+func childPath(space, tag string) string {
+	if space == "" {
+		return "./" + tag
+	} else {
+		return "./" + space + ":" + tag
+	}
+}
+
+// The RemoveElement method on etree.Element isn't recursive...
+func recursivelyRemoveElement(tree, el *etree.Element) bool {
+	if tree.RemoveChild(el) != nil {
+		return true
+	}
+
+	for _, child := range tree.Child {
+		if childElement, ok := child.(*etree.Element); ok {
+			if recursivelyRemoveElement(childElement, el) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// transform applies the passed set of transforms to the specified root element.
+//
+// The functionality of transform is currently very limited and purpose-specific.
+//
+// NOTE(russell_h): Ideally this wouldn't mutate the root passed to it, and would
+// instead return a copy. Unfortunately copying the tree makes it difficult to
+// correctly locate the signature. I'm opting, for now, to simply mutate the root
+// parameter.
+func (ctx *ValidationContext) transform(
+	el *etree.Element,
+	sig *types.Signature,
+	ref *types.Reference) (*etree.Element, Canonicalizer, error) {
+	transforms := ref.Transforms.Transforms
+
+	if len(transforms) != 2 {
+		return nil, nil, errors.New("Expected Enveloped and C14N transforms")
+	}
+
+	var canonicalizer Canonicalizer
+
+	for _, transform := range transforms {
+		algo := transform.Algorithm
+
+		switch AlgorithmID(algo) {
+		case EnvelopedSignatureAltorithmId:
+			if !recursivelyRemoveElement(el, sig.UnderlyingElement()) {
+				return nil, nil, errors.New("Error applying canonicalization transform: Signature not found")
+			}
+
+		case CanonicalXML10ExclusiveAlgorithmId:
+			var prefixList string
+			if transform.InclusiveNamespaces != nil {
+				prefixList = transform.InclusiveNamespaces.PrefixList
+			}
+
+			canonicalizer = MakeC14N10ExclusiveCanonicalizerWithPrefixList(prefixList)
+
+		case CanonicalXML11AlgorithmId:
+			canonicalizer = MakeC14N11Canonicalizer()
+
+		default:
+			return nil, nil, errors.New("Unknown Transform Algorithm: " + algo)
+		}
+	}
+
+	if canonicalizer == nil {
+		return nil, nil, errors.New("Expected canonicalization transform")
+	}
+
+	return el, canonicalizer, nil
+}
+
+func (ctx *ValidationContext) digest(el *etree.Element, digestAlgorithmId string, canonicalizer Canonicalizer) ([]byte, error) {
+	data, err := canonicalizer.Canonicalize(el)
+	if err != nil {
+		return nil, err
+	}
+
+	digestAlgorithm, ok := digestAlgorithmsByIdentifier[digestAlgorithmId]
+	if !ok {
+		return nil, errors.New("Unknown digest algorithm: " + digestAlgorithmId)
+	}
+
+	hash := digestAlgorithm.New()
+	_, err = hash.Write(data)
+	if err != nil {
+		return nil, err
+	}
+
+	return hash.Sum(nil), nil
+}
+
+func (ctx *ValidationContext) verifySignedInfo(sig *types.Signature, canonicalizer Canonicalizer, signatureMethodId string, cert *x509.Certificate, decodedSignature []byte) error {
+	signatureElement := sig.UnderlyingElement()
+
+	signedInfo := signatureElement.FindElement(childPath(signatureElement.Space, SignedInfoTag))
+	if signedInfo == nil {
+		return errors.New("Missing SignedInfo")
+	}
+
+	// Canonicalize the xml
+	canonical, err := canonicalSerialize(signedInfo)
+	if err != nil {
+		return err
+	}
+
+	signatureAlgorithm, ok := signatureMethodsByIdentifier[signatureMethodId]
+	if !ok {
+		return errors.New("Unknown signature method: " + signatureMethodId)
+	}
+
+	hash := signatureAlgorithm.New()
+	_, err = hash.Write(canonical)
+	if err != nil {
+		return err
+	}
+
+	hashed := hash.Sum(nil)
+
+	pubKey, ok := cert.PublicKey.(*rsa.PublicKey)
+	if !ok {
+		return errors.New("Invalid public key")
+	}
+
+	// Verify that the private key matching the public key from the cert was what was used to sign the 'SignedInfo' and produce the 'SignatureValue'
+	err = rsa.VerifyPKCS1v15(pubKey, signatureAlgorithm, hashed[:], decodedSignature)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (ctx *ValidationContext) validateSignature(el *etree.Element, sig *types.Signature, cert *x509.Certificate) (*etree.Element, error) {
+	idAttr := el.SelectAttr(ctx.IdAttribute)
+	if idAttr == nil || idAttr.Value == "" {
+		return nil, errors.New("Missing ID attribute")
+	}
+
+	var ref *types.Reference
+
+	// Find the first reference which references the top-level element
+	for _, _ref := range sig.SignedInfo.References {
+		if _ref.URI == "" || _ref.URI[1:] == idAttr.Value {
+			ref = &_ref
+		}
+	}
+
+	// Perform all transformations listed in the 'SignedInfo'
+	// Basically, this means removing the 'SignedInfo'
+	transformed, canonicalizer, err := ctx.transform(el, sig, ref)
+	if err != nil {
+		return nil, err
+	}
+
+	digestAlgorithm := ref.DigestAlgo.Algorithm
+
+	// Digest the transformed XML and compare it to the 'DigestValue' from the 'SignedInfo'
+	digest, err := ctx.digest(transformed, digestAlgorithm, canonicalizer)
+	if err != nil {
+		return nil, err
+	}
+
+	decodedDigestValue, err := base64.StdEncoding.DecodeString(ref.DigestValue)
+	if err != nil {
+		return nil, err
+	}
+
+	if !bytes.Equal(digest, decodedDigestValue) {
+		return nil, errors.New("Signature could not be verified")
+	}
+
+	// Decode the 'SignatureValue' so we can compare against it
+	decodedSignature, err := base64.StdEncoding.DecodeString(sig.SignatureValue.Data)
+	if err != nil {
+		return nil, errors.New("Could not decode signature")
+	}
+
+	// Actually verify the 'SignedInfo' was signed by a trusted source
+	signatureMethod := sig.SignedInfo.SignatureMethod.Algorithm
+	err = ctx.verifySignedInfo(sig, canonicalizer, signatureMethod, cert, decodedSignature)
+	if err != nil {
+		return nil, err
+	}
+
+	return transformed, nil
+}
+
+func contains(roots []*x509.Certificate, cert *x509.Certificate) bool {
+	for _, root := range roots {
+		if root.Equal(cert) {
+			return true
+		}
+	}
+	return false
+}
+
+// findSignature searches for a Signature element referencing the passed root element.
+func (ctx *ValidationContext) findSignature(el *etree.Element) (*types.Signature, error) {
+	idAttr := el.SelectAttr(ctx.IdAttribute)
+	if idAttr == nil || idAttr.Value == "" {
+		return nil, errors.New("Missing ID attribute")
+	}
+
+	var sig *types.Signature
+
+	// Traverse the tree looking for a Signature element
+	err := etreeutils.NSFindIterate(el, Namespace, SignatureTag, func(ctx etreeutils.NSContext, el *etree.Element) error {
+
+		found := false
+		err := etreeutils.NSFindIterateCtx(ctx, el, Namespace, SignedInfoTag,
+			func(ctx etreeutils.NSContext, signedInfo *etree.Element) error {
+				// Ignore any SignedInfo that isn't an immediate descendent of Signature.
+				if signedInfo.Parent() != el {
+					return nil
+				}
+
+				detachedSignedInfo, err := etreeutils.NSDetatch(ctx, signedInfo)
+				if err != nil {
+					return err
+				}
+
+				c14NMethod := detachedSignedInfo.FindElement(childPath(detachedSignedInfo.Space, CanonicalizationMethodTag))
+				if c14NMethod == nil {
+					return errors.New("missing CanonicalizationMethod on Signature")
+				}
+
+				c14NAlgorithm := c14NMethod.SelectAttrValue(AlgorithmAttr, "")
+
+				var canonicalSignedInfo *etree.Element
+
+				switch AlgorithmID(c14NAlgorithm) {
+				case CanonicalXML10ExclusiveAlgorithmId:
+					err := etreeutils.TransformExcC14n(detachedSignedInfo, "")
+					if err != nil {
+						return err
+					}
+
+					// NOTE: TransformExcC14n transforms the element in-place,
+					// while canonicalPrep isn't meant to. Once we standardize
+					// this behavior we can drop this, as well as the adding and
+					// removing of elements below.
+					canonicalSignedInfo = detachedSignedInfo
+
+				case CanonicalXML11AlgorithmId:
+					canonicalSignedInfo = canonicalPrep(detachedSignedInfo, map[string]struct{}{})
+
+				default:
+					return fmt.Errorf("invalid CanonicalizationMethod on Signature: %s", c14NAlgorithm)
+				}
+
+				el.RemoveChild(signedInfo)
+				el.AddChild(canonicalSignedInfo)
+
+				found = true
+
+				return etreeutils.ErrTraversalHalted
+			})
+		if err != nil {
+			return err
+		}
+
+		if !found {
+			return errors.New("Missing SignedInfo")
+		}
+
+		// Unmarshal the signature into a structured Signature type
+		_sig := &types.Signature{}
+		err = etreeutils.NSUnmarshalElement(ctx, el, _sig)
+		if err != nil {
+			return err
+		}
+
+		// Traverse references in the signature to determine whether it has at least
+		// one reference to the top level element. If so, conclude the search.
+		for _, ref := range _sig.SignedInfo.References {
+			if ref.URI == "" || ref.URI[1:] == idAttr.Value {
+				sig = _sig
+				return etreeutils.ErrTraversalHalted
+			}
+		}
+
+		return nil
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	if sig == nil {
+		return nil, ErrMissingSignature
+	}
+
+	return sig, nil
+}
+
+func (ctx *ValidationContext) verifyCertificate(sig *types.Signature) (*x509.Certificate, error) {
+	now := ctx.Clock.Now()
+
+	roots, err := ctx.CertificateStore.Certificates()
+	if err != nil {
+		return nil, err
+	}
+
+	var cert *x509.Certificate
+
+	if sig.KeyInfo != nil {
+		// If the Signature includes KeyInfo, extract the certificate from there
+		if sig.KeyInfo.X509Data.X509Certificate.Data == "" {
+			return nil, errors.New("missing X509Certificate within KeyInfo")
+		}
+
+		certData, err := base64.StdEncoding.DecodeString(
+			whiteSpace.ReplaceAllString(sig.KeyInfo.X509Data.X509Certificate.Data, ""))
+		if err != nil {
+			return nil, errors.New("Failed to parse certificate")
+		}
+
+		cert, err = x509.ParseCertificate(certData)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		// If the Signature doesn't have KeyInfo, Use the root certificate if there is only one
+		if len(roots) == 1 {
+			cert = roots[0]
+		} else {
+			return nil, errors.New("Missing x509 Element")
+		}
+	}
+
+	// Verify that the certificate is one we trust
+	if !contains(roots, cert) {
+		return nil, errors.New("Could not verify certificate against trusted certs")
+	}
+
+	if now.Before(cert.NotBefore) || now.After(cert.NotAfter) {
+		return nil, errors.New("Cert is not valid at this time")
+	}
+
+	return cert, nil
+}
+
+// Validate verifies that the passed element contains a valid enveloped signature
+// matching a currently-valid certificate in the context's CertificateStore.
+func (ctx *ValidationContext) Validate(el *etree.Element) (*etree.Element, error) {
+	// Make a copy of the element to avoid mutating the one we were passed.
+	el = el.Copy()
+
+	sig, err := ctx.findSignature(el)
+	if err != nil {
+		return nil, err
+	}
+
+	cert, err := ctx.verifyCertificate(sig)
+	if err != nil {
+		return nil, err
+	}
+
+	return ctx.validateSignature(el, sig, cert)
+}
diff --git a/vendor/github.com/russellhaering/goxmldsig/xml_constants.go b/vendor/github.com/russellhaering/goxmldsig/xml_constants.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c9cb6937de0730c3ca545c7359b002c0764d4a2
--- /dev/null
+++ b/vendor/github.com/russellhaering/goxmldsig/xml_constants.go
@@ -0,0 +1,78 @@
+package dsig
+
+import "crypto"
+
+const (
+	DefaultPrefix = "ds"
+	Namespace     = "http://www.w3.org/2000/09/xmldsig#"
+)
+
+// Tags
+const (
+	SignatureTag              = "Signature"
+	SignedInfoTag             = "SignedInfo"
+	CanonicalizationMethodTag = "CanonicalizationMethod"
+	SignatureMethodTag        = "SignatureMethod"
+	ReferenceTag              = "Reference"
+	TransformsTag             = "Transforms"
+	TransformTag              = "Transform"
+	DigestMethodTag           = "DigestMethod"
+	DigestValueTag            = "DigestValue"
+	SignatureValueTag         = "SignatureValue"
+	KeyInfoTag                = "KeyInfo"
+	X509DataTag               = "X509Data"
+	X509CertificateTag        = "X509Certificate"
+	InclusiveNamespacesTag    = "InclusiveNamespaces"
+)
+
+const (
+	AlgorithmAttr  = "Algorithm"
+	URIAttr        = "URI"
+	DefaultIdAttr  = "ID"
+	PrefixListAttr = "PrefixList"
+)
+
+type AlgorithmID string
+
+func (id AlgorithmID) String() string {
+	return string(id)
+}
+
+const (
+	RSASHA1SignatureMethod   = "http://www.w3.org/2000/09/xmldsig#rsa-sha1"
+	RSASHA256SignatureMethod = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"
+	RSASHA512SignatureMethod = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha512"
+)
+
+//Well-known signature algorithms
+const (
+	// Supported canonicalization algorithms
+	CanonicalXML10ExclusiveAlgorithmId AlgorithmID = "http://www.w3.org/2001/10/xml-exc-c14n#"
+	CanonicalXML11AlgorithmId          AlgorithmID = "http://www.w3.org/2006/12/xml-c14n11"
+
+	EnvelopedSignatureAltorithmId AlgorithmID = "http://www.w3.org/2000/09/xmldsig#enveloped-signature"
+)
+
+var digestAlgorithmIdentifiers = map[crypto.Hash]string{
+	crypto.SHA1:   "http://www.w3.org/2000/09/xmldsig#sha1",
+	crypto.SHA256: "http://www.w3.org/2001/04/xmlenc#sha256",
+	crypto.SHA512: "http://www.w3.org/2001/04/xmlenc#sha512",
+}
+
+var digestAlgorithmsByIdentifier = map[string]crypto.Hash{}
+var signatureMethodsByIdentifier = map[string]crypto.Hash{}
+
+func init() {
+	for hash, id := range digestAlgorithmIdentifiers {
+		digestAlgorithmsByIdentifier[id] = hash
+	}
+	for hash, id := range signatureMethodIdentifiers {
+		signatureMethodsByIdentifier[id] = hash
+	}
+}
+
+var signatureMethodIdentifiers = map[crypto.Hash]string{
+	crypto.SHA1:   RSASHA1SignatureMethod,
+	crypto.SHA256: RSASHA256SignatureMethod,
+	crypto.SHA512: RSASHA512SignatureMethod,
+}
diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
new file mode 100644
index 0000000000000000000000000000000000000000..6c6e84236ab76c84070c85889507d2d9fb2e2d5d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
@@ -0,0 +1,120 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ripemd160 implements the RIPEMD-160 hash algorithm.
+package ripemd160 // import "golang.org/x/crypto/ripemd160"
+
+// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart
+// Preneel with specifications available at:
+// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
+
+import (
+	"crypto"
+	"hash"
+)
+
+func init() {
+	crypto.RegisterHash(crypto.RIPEMD160, New)
+}
+
+// The size of the checksum in bytes.
+const Size = 20
+
+// The block size of the hash algorithm in bytes.
+const BlockSize = 64
+
+const (
+	_s0 = 0x67452301
+	_s1 = 0xefcdab89
+	_s2 = 0x98badcfe
+	_s3 = 0x10325476
+	_s4 = 0xc3d2e1f0
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+	s  [5]uint32       // running context
+	x  [BlockSize]byte // temporary buffer
+	nx int             // index into x
+	tc uint64          // total count of bytes processed
+}
+
+func (d *digest) Reset() {
+	d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4
+	d.nx = 0
+	d.tc = 0
+}
+
+// New returns a new hash.Hash computing the checksum.
+func New() hash.Hash {
+	result := new(digest)
+	result.Reset()
+	return result
+}
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+	nn = len(p)
+	d.tc += uint64(nn)
+	if d.nx > 0 {
+		n := len(p)
+		if n > BlockSize-d.nx {
+			n = BlockSize - d.nx
+		}
+		for i := 0; i < n; i++ {
+			d.x[d.nx+i] = p[i]
+		}
+		d.nx += n
+		if d.nx == BlockSize {
+			_Block(d, d.x[0:])
+			d.nx = 0
+		}
+		p = p[n:]
+	}
+	n := _Block(d, p)
+	p = p[n:]
+	if len(p) > 0 {
+		d.nx = copy(d.x[:], p)
+	}
+	return
+}
+
+func (d0 *digest) Sum(in []byte) []byte {
+	// Make a copy of d0 so that caller can keep writing and summing.
+	d := *d0
+
+	// Padding.  Add a 1 bit and 0 bits until 56 bytes mod 64.
+	tc := d.tc
+	var tmp [64]byte
+	tmp[0] = 0x80
+	if tc%64 < 56 {
+		d.Write(tmp[0 : 56-tc%64])
+	} else {
+		d.Write(tmp[0 : 64+56-tc%64])
+	}
+
+	// Length in bits.
+	tc <<= 3
+	for i := uint(0); i < 8; i++ {
+		tmp[i] = byte(tc >> (8 * i))
+	}
+	d.Write(tmp[0:8])
+
+	if d.nx != 0 {
+		panic("d.nx != 0")
+	}
+
+	var digest [Size]byte
+	for i, s := range d.s {
+		digest[i*4] = byte(s)
+		digest[i*4+1] = byte(s >> 8)
+		digest[i*4+2] = byte(s >> 16)
+		digest[i*4+3] = byte(s >> 24)
+	}
+
+	return append(in, digest[:]...)
+}
diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
new file mode 100644
index 0000000000000000000000000000000000000000..7bc8e6c485e53c6b55dfd2f16c2d93b84b2b4bee
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
@@ -0,0 +1,161 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// RIPEMD-160 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+package ripemd160
+
+// work buffer indices and roll amounts for one line
+var _n = [80]uint{
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+	7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
+	3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
+	1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
+	4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
+}
+
+var _r = [80]uint{
+	11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
+	7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
+	11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
+	11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
+	9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
+}
+
+// same for the other parallel one
+var n_ = [80]uint{
+	5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
+	6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
+	15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
+	8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
+	12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
+}
+
+var r_ = [80]uint{
+	8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
+	9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
+	9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
+	15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
+	8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
+}
+
+func _Block(md *digest, p []byte) int {
+	n := 0
+	var x [16]uint32
+	var alpha, beta uint32
+	for len(p) >= BlockSize {
+		a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4]
+		aa, bb, cc, dd, ee := a, b, c, d, e
+		j := 0
+		for i := 0; i < 16; i++ {
+			x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
+			j += 4
+		}
+
+		// round 1
+		i := 0
+		for i < 16 {
+			alpha = a + (b ^ c ^ d) + x[_n[i]]
+			s := _r[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + e
+			beta = c<<10 | c>>22
+			a, b, c, d, e = e, alpha, b, beta, d
+
+			// parallel line
+			alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
+			s = r_[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + ee
+			beta = cc<<10 | cc>>22
+			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+			i++
+		}
+
+		// round 2
+		for i < 32 {
+			alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
+			s := _r[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + e
+			beta = c<<10 | c>>22
+			a, b, c, d, e = e, alpha, b, beta, d
+
+			// parallel line
+			alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
+			s = r_[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + ee
+			beta = cc<<10 | cc>>22
+			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+			i++
+		}
+
+		// round 3
+		for i < 48 {
+			alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
+			s := _r[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + e
+			beta = c<<10 | c>>22
+			a, b, c, d, e = e, alpha, b, beta, d
+
+			// parallel line
+			alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
+			s = r_[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + ee
+			beta = cc<<10 | cc>>22
+			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+			i++
+		}
+
+		// round 4
+		for i < 64 {
+			alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
+			s := _r[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + e
+			beta = c<<10 | c>>22
+			a, b, c, d, e = e, alpha, b, beta, d
+
+			// parallel line
+			alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
+			s = r_[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + ee
+			beta = cc<<10 | cc>>22
+			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+			i++
+		}
+
+		// round 5
+		for i < 80 {
+			alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
+			s := _r[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + e
+			beta = c<<10 | c>>22
+			a, b, c, d, e = e, alpha, b, beta, d
+
+			// parallel line
+			alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
+			s = r_[i]
+			alpha = (alpha<<s | alpha>>(32-s)) + ee
+			beta = cc<<10 | cc>>22
+			aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+			i++
+		}
+
+		// combine results
+		dd += c + md.s[1]
+		md.s[1] = md.s[2] + d + ee
+		md.s[2] = md.s[3] + e + aa
+		md.s[3] = md.s[4] + a + bb
+		md.s[4] = md.s[0] + b + cc
+		md.s[0] = dd
+
+		p = p[BlockSize:]
+		n += BlockSize
+	}
+	return n
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index a4bc948b76f04e68882935e75b44e1b97bd12347..303a94cd4ea2202037e94548ae97d02be07b3541 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -14,12 +14,36 @@
 			"revision": "3f8dd8687feddc1594655a2167e798d24ebaed1c",
 			"revisionTime": "2017-11-01T08:12:58Z"
 		},
+		{
+			"checksumSHA1": "usT4LCSQItkFvFOQT7cBlkCuGaE=",
+			"path": "github.com/beevik/etree",
+			"revision": "af219c0c7ea1b67ec263c0b1b1b96d284a9181ce",
+			"revisionTime": "2017-10-15T22:09:51Z"
+		},
 		{
 			"checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=",
 			"path": "github.com/beorn7/perks/quantile",
 			"revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9",
 			"revisionTime": "2016-08-04T10:47:26Z"
 		},
+		{
+			"checksumSHA1": "8zBDp6vRYSBYxja1o+Vr3RpAq2U=",
+			"path": "github.com/crewjam/saml",
+			"revision": "5e89d545f563261a14cd84483eb5ff10233e208e",
+			"revisionTime": "2017-09-26T17:35:36Z"
+		},
+		{
+			"checksumSHA1": "lKNxxjwhZtBRpx6/uRQTL8EQ4Io=",
+			"path": "github.com/crewjam/saml/logger",
+			"revision": "5e89d545f563261a14cd84483eb5ff10233e208e",
+			"revisionTime": "2017-09-26T17:35:36Z"
+		},
+		{
+			"checksumSHA1": "OfLuV96DWYV71+47je/q5duOoEc=",
+			"path": "github.com/crewjam/saml/xmlenc",
+			"revision": "5e89d545f563261a14cd84483eb5ff10233e208e",
+			"revisionTime": "2017-09-26T17:35:36Z"
+		},
 		{
 			"checksumSHA1": "7DxViusFRJ7UPH0jZqYatwDrOkY=",
 			"path": "github.com/elazarl/go-bindata-assetfs",
@@ -62,6 +86,12 @@
 			"revision": "a3acf13e802c358d65f249324d14ed24aac11370",
 			"revisionTime": "2017-10-08T21:47:40Z"
 		},
+		{
+			"checksumSHA1": "SGc5vSs9tXhrGJ5ncymDyMvTg24=",
+			"path": "github.com/jonboulle/clockwork",
+			"revision": "bcac9884e7502bb2b474c0339d889cb981a2f27f",
+			"revisionTime": "2016-09-07T12:20:59Z"
+		},
 		{
 			"checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=",
 			"path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
@@ -135,6 +165,24 @@
 			"revision": "a6e9df898b1336106c743392c48ee0b71f5c4efa",
 			"revisionTime": "2017-10-17T21:40:25Z"
 		},
+		{
+			"checksumSHA1": "UXPsmvl8HiH8mrBHG1JyU+fGP4g=",
+			"path": "github.com/russellhaering/goxmldsig",
+			"revision": "b7efc6231e45b10bfd779852831c8bb59b350ec5",
+			"revisionTime": "2017-09-11T19:10:14Z"
+		},
+		{
+			"checksumSHA1": "p8URJMxNQPXJ7RaiZF/CqlwNMzw=",
+			"path": "github.com/russellhaering/goxmldsig/etreeutils",
+			"revision": "b7efc6231e45b10bfd779852831c8bb59b350ec5",
+			"revisionTime": "2017-09-11T19:10:14Z"
+		},
+		{
+			"checksumSHA1": "Q4UydcJq+umxYmfYCUJKT+emIKg=",
+			"path": "github.com/russellhaering/goxmldsig/types",
+			"revision": "b7efc6231e45b10bfd779852831c8bb59b350ec5",
+			"revisionTime": "2017-09-11T19:10:14Z"
+		},
 		{
 			"checksumSHA1": "zaSFO4G24r5ytya4wjN5UPZQ09Y=",
 			"path": "github.com/tstranex/u2f",
@@ -153,6 +201,12 @@
 			"revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce",
 			"revisionTime": "2017-10-23T14:45:55Z"
 		},
+		{
+			"checksumSHA1": "y/oIaxq2d3WPizRZfVjo8RCRYTU=",
+			"path": "golang.org/x/crypto/ripemd160",
+			"revision": "9419663f5a44be8b34ca85f08abc5fe1be11f8a3",
+			"revisionTime": "2017-09-30T17:45:11Z"
+		},
 		{
 			"checksumSHA1": "dp+OSc8jJIOd6h1mXoAmD3GGAAs=",
 			"path": "golang.org/x/sys/unix",