From 3f7867d83b00e59fc8d84135031433fd6bde3efe Mon Sep 17 00:00:00 2001
From: renovate <renovate-bot@autistici.org>
Date: Thu, 14 Jan 2021 11:21:11 +0000
Subject: [PATCH] Update module oschwald/maxminddb-golang to v0.2.0

---
 go.mod                                        |   2 +-
 go.sum                                        |   2 +
 .../oschwald/maxminddb-golang/.gitignore      |   4 -
 .../oschwald/maxminddb-golang/.gitmodules     |   2 +-
 .../oschwald/maxminddb-golang/.travis.yml     |  22 +-
 .../oschwald/maxminddb-golang/LICENSE         | 211 ++++++-
 .../oschwald/maxminddb-golang/README.md       |  12 +-
 .../oschwald/maxminddb-golang/appveyor.yml    |  19 -
 .../oschwald/maxminddb-golang/build.cmd       |   3 +
 .../oschwald/maxminddb-golang/decoder.go      | 515 ++++++------------
 .../oschwald/maxminddb-golang/errors.go       |  42 --
 .../oschwald/maxminddb-golang/mmap_unix.go    |  17 -
 .../oschwald/maxminddb-golang/reader.go       | 169 +++---
 .../maxminddb-golang/reader_appengine.go      |  26 -
 .../oschwald/maxminddb-golang/reader_other.go |  61 ---
 .../oschwald/maxminddb-golang/reader_unix.go  |  15 +
 .../{mmap_windows.go => reader_windows.go}    |  30 +-
 .../oschwald/maxminddb-golang/traverse.go     | 108 ----
 .../oschwald/maxminddb-golang/verifier.go     | 185 -------
 vendor/modules.txt                            |   2 +-
 20 files changed, 487 insertions(+), 960 deletions(-)
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/.gitignore
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
 create mode 100644 vendor/github.com/oschwald/maxminddb-golang/build.cmd
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/errors.go
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/reader_other.go
 create mode 100644 vendor/github.com/oschwald/maxminddb-golang/reader_unix.go
 rename vendor/github.com/oschwald/maxminddb-golang/{mmap_windows.go => reader_windows.go} (70%)
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/traverse.go
 delete mode 100644 vendor/github.com/oschwald/maxminddb-golang/verifier.go

diff --git a/go.mod b/go.mod
index 06c4c38..3b7fefc 100644
--- a/go.mod
+++ b/go.mod
@@ -14,7 +14,7 @@ require (
 	github.com/jonboulle/clockwork v0.2.2 // indirect
 	github.com/mattermost/xml-roundtrip-validator v0.0.0-20201219040909-8fd2afad43d1 // indirect
 	github.com/mssola/user_agent v0.0.0-20170906152553-a2f39d5a9b15
-	github.com/oschwald/maxminddb-golang v0.0.0-20170901134056-26fe5ace1c70
+	github.com/oschwald/maxminddb-golang v0.2.0
 	github.com/prometheus/client_golang v1.9.0
 	github.com/rs/cors v0.0.0-20190613161432-33ffc0734c60
 	github.com/tstranex/u2f v1.0.0
diff --git a/go.sum b/go.sum
index acb9baa..c2ae492 100644
--- a/go.sum
+++ b/go.sum
@@ -272,6 +272,8 @@ github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYE
 github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE=
 github.com/oschwald/maxminddb-golang v0.0.0-20170901134056-26fe5ace1c70 h1:XGLYUmodtNzThosQ8GkMvj9TiIB/uWsP8NfxKSa3aDc=
 github.com/oschwald/maxminddb-golang v0.0.0-20170901134056-26fe5ace1c70/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY=
+github.com/oschwald/maxminddb-golang v0.2.0 h1:cdvE3VUWCRdu+tYIBtwbcPWj1A83jZc5CSdGSuhnqO8=
+github.com/oschwald/maxminddb-golang v0.2.0/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY=
 github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
 github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitignore b/vendor/github.com/oschwald/maxminddb-golang/.gitignore
deleted file mode 100644
index fe3fa4a..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.vscode
-*.out
-*.sw?
-*.test
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitmodules b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules
index 400b2ab..51779cb 100644
--- a/vendor/github.com/oschwald/maxminddb-golang/.gitmodules
+++ b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules
@@ -1,3 +1,3 @@
 [submodule "test-data"]
 	path = test-data
-	url = https://github.com/maxmind/MaxMind-DB.git
+	url = git://github.com/maxmind/MaxMind-DB.git
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.travis.yml b/vendor/github.com/oschwald/maxminddb-golang/.travis.yml
index 49c4478..ab1f959 100644
--- a/vendor/github.com/oschwald/maxminddb-golang/.travis.yml
+++ b/vendor/github.com/oschwald/maxminddb-golang/.travis.yml
@@ -1,23 +1,11 @@
 language: go
 
 go:
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - 1.8
+ - 1.1
+ - 1.2
+ - 1.3
+ - release
  - tip
 
-before_install:
-  - "if [[ $TRAVIS_GO_VERSION == 1.7 ]]; then go get -v github.com/golang/lint/golint; fi"
-
 install:
-  - go get -v -t ./...
-
-script:
-  - go test -race -cpu 1,4 -v
-  - go test -race -v -tags appengine
-  - "if [[ $TRAVIS_GO_VERSION == 1.7 ]]; then go vet ./...; fi"
-  - "if [[ $TRAVIS_GO_VERSION == 1.7 ]]; then golint .; fi"
-
-sudo: false
+  - go get launchpad.net/gocheck
diff --git a/vendor/github.com/oschwald/maxminddb-golang/LICENSE b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
index 2969677..d645695 100644
--- a/vendor/github.com/oschwald/maxminddb-golang/LICENSE
+++ b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
@@ -1,15 +1,202 @@
-ISC License
 
-Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
 
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/README.md b/vendor/github.com/oschwald/maxminddb-golang/README.md
index cdd6bd1..7efb9d1 100644
--- a/vendor/github.com/oschwald/maxminddb-golang/README.md
+++ b/vendor/github.com/oschwald/maxminddb-golang/README.md
@@ -1,14 +1,12 @@
 # MaxMind DB Reader for Go #
 
 [![Build Status](https://travis-ci.org/oschwald/maxminddb-golang.png?branch=master)](https://travis-ci.org/oschwald/maxminddb-golang)
-[![Windows Build Status](https://ci.appveyor.com/api/projects/status/4j2f9oep8nnfrmov/branch/master?svg=true)](https://ci.appveyor.com/project/oschwald/maxminddb-golang/branch/master)
 [![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.png)](https://godoc.org/github.com/oschwald/maxminddb-golang)
 
-This is a Go reader for the MaxMind DB format. Although this can be used to
-read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
-[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
-[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
-API for doing so.
+
+This is a Go reader for the MaxMind DB format. This can be used to read
+[GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
+[GeoIP2](http://www.maxmind.com/en/geolocation_landing) databases.
 
 This is not an official MaxMind API.
 
@@ -35,4 +33,4 @@ with your changes.
 
 ## License ##
 
-This is free software, licensed under the ISC License.
+This is free software, licensed under the Apache License, Version 2.0.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml b/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
deleted file mode 100644
index e2bb9dd..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-version: "{build}"
-
-os: Windows Server 2012 R2
-
-clone_folder: c:\gopath\src\github.com\oschwald\maxminddb-golang
-
-environment:
-  GOPATH: c:\gopath
-
-install:
-  - echo %PATH%
-  - echo %GOPATH%
-  - git submodule update --init --recursive
-  - go version
-  - go env
-  - go get -v -t ./...
-
-build_script:
-  - go test -v ./...
diff --git a/vendor/github.com/oschwald/maxminddb-golang/build.cmd b/vendor/github.com/oschwald/maxminddb-golang/build.cmd
new file mode 100644
index 0000000..f5802a2
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/build.cmd
@@ -0,0 +1,3 @@
+set GOPATH=%BuildFolder%
+go get -v ./...
+go test -v ./...
diff --git a/vendor/github.com/oschwald/maxminddb-golang/decoder.go b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
index 396da75..7984629 100644
--- a/vendor/github.com/oschwald/maxminddb-golang/decoder.go
+++ b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
@@ -1,15 +1,16 @@
 package maxminddb
 
 import (
+	"bytes"
 	"encoding/binary"
-	"math"
+	"fmt"
 	"math/big"
 	"reflect"
-	"sync"
 )
 
 type decoder struct {
-	buffer []byte
+	buffer      []byte
+	pointerBase uint
 }
 
 type dataType int
@@ -33,105 +34,59 @@ const (
 	_Float32
 )
 
-const (
-	// This is the value used in libmaxminddb
-	maximumDataStructureDepth = 512
-)
-
-func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
-	if depth > maximumDataStructureDepth {
-		return 0, newInvalidDatabaseError("exceeded maximum data structure depth; database is likely corrupt")
-	}
-	typeNum, size, newOffset, err := d.decodeCtrlData(offset)
-	if err != nil {
-		return 0, err
-	}
-
-	if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
-		result.Set(reflect.ValueOf(uintptr(offset)))
-		return d.nextValueOffset(offset, 1)
-	}
-	return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
+func (d *decoder) decode(offset uint, result reflect.Value) (uint, error) {
+	typeNum, size, newOffset := d.decodeCtrlData(offset)
+	return d.decodeFromType(typeNum, size, newOffset, result)
 }
 
-func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
+func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint) {
 	newOffset := offset + 1
-	if offset >= uint(len(d.buffer)) {
-		return 0, 0, 0, newOffsetError()
-	}
 	ctrlByte := d.buffer[offset]
 
 	typeNum := dataType(ctrlByte >> 5)
 	if typeNum == _Extended {
-		if newOffset >= uint(len(d.buffer)) {
-			return 0, 0, 0, newOffsetError()
-		}
 		typeNum = dataType(d.buffer[newOffset] + 7)
 		newOffset++
 	}
 
 	var size uint
-	size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
-	return typeNum, size, newOffset, err
+	size, newOffset = d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
+	return typeNum, size, newOffset
 }
 
-func (d *decoder) sizeFromCtrlByte(ctrlByte byte, offset uint, typeNum dataType) (uint, uint, error) {
+func (d *decoder) sizeFromCtrlByte(ctrlByte byte, offset uint, typeNum dataType) (uint, uint) {
 	size := uint(ctrlByte & 0x1f)
 	if typeNum == _Extended {
-		return size, offset, nil
+		return size, offset
 	}
 
 	var bytesToRead uint
-	if size < 29 {
-		return size, offset, nil
+	if size > 28 {
+		bytesToRead = size - 28
 	}
 
-	bytesToRead = size - 28
 	newOffset := offset + bytesToRead
-	if newOffset > uint(len(d.buffer)) {
-		return 0, 0, newOffsetError()
-	}
-	if size == 29 {
-		return 29 + uint(d.buffer[offset]), offset + 1, nil
-	}
-
 	sizeBytes := d.buffer[offset:newOffset]
 
 	switch {
+	case size == 29:
+		size = 29 + uint(sizeBytes[0])
 	case size == 30:
-		size = 285 + uintFromBytes(0, sizeBytes)
+		size = 285 + uint(uintFromBytes(0, sizeBytes))
 	case size > 30:
-		size = uintFromBytes(0, sizeBytes) + 65821
+		size = uint(uintFromBytes(0, sizeBytes)) + 65821
 	}
-	return size, newOffset, nil
+	return size, newOffset
 }
 
-func (d *decoder) decodeFromType(
-	dtype dataType,
-	size uint,
-	offset uint,
-	result reflect.Value,
-	depth int,
-) (uint, error) {
-	result = d.indirect(result)
+func (d *decoder) decodeFromType(dtype dataType, size uint, offset uint, result reflect.Value) (uint, error) {
+	if result.Kind() == reflect.Ptr {
+		result = reflect.Indirect(result)
+	}
 
-	// For these types, size has a special meaning
 	switch dtype {
 	case _Bool:
 		return d.unmarshalBool(size, offset, result)
-	case _Map:
-		return d.unmarshalMap(size, offset, result, depth)
-	case _Pointer:
-		return d.unmarshalPointer(size, offset, result, depth)
-	case _Slice:
-		return d.unmarshalSlice(size, offset, result, depth)
-	}
-
-	// For the remaining types, size is the byte size
-	if offset+size > uint(len(d.buffer)) {
-		return 0, newOffsetError()
-	}
-	switch dtype {
 	case _Bytes:
 		return d.unmarshalBytes(size, offset, result)
 	case _Float32:
@@ -140,6 +95,12 @@ func (d *decoder) decodeFromType(
 		return d.unmarshalFloat64(size, offset, result)
 	case _Int32:
 		return d.unmarshalInt32(size, offset, result)
+	case _Map:
+		return d.unmarshalMap(size, offset, result)
+	case _Pointer:
+		return d.unmarshalPointer(size, offset, result)
+	case _Slice:
+		return d.unmarshalSlice(size, offset, result)
 	case _String:
 		return d.unmarshalString(size, offset, result)
 	case _Uint16:
@@ -151,84 +112,51 @@ func (d *decoder) decodeFromType(
 	case _Uint128:
 		return d.unmarshalUint128(size, offset, result)
 	default:
-		return 0, newInvalidDatabaseError("unknown type: %d", dtype)
+		return 0, fmt.Errorf("unknown type: %d", dtype)
 	}
 }
 
 func (d *decoder) unmarshalBool(size uint, offset uint, result reflect.Value) (uint, error) {
 	if size > 1 {
-		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
+		return 0, fmt.Errorf("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
 	}
 	value, newOffset, err := d.decodeBool(size, offset)
 	if err != nil {
 		return 0, err
 	}
 	switch result.Kind() {
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.Bool:
 		result.SetBool(value)
 		return newOffset, nil
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
-	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
-}
-
-// indirect follows pointers and create values as necessary. This is
-// heavily based on encoding/json as my original version had a subtle
-// bug. This method should be considered to be licensed under
-// https://golang.org/LICENSE
-func (d *decoder) indirect(result reflect.Value) reflect.Value {
-	for {
-		// Load value from interface, but only if the result will be
-		// usefully addressable.
-		if result.Kind() == reflect.Interface && !result.IsNil() {
-			e := result.Elem()
-			if e.Kind() == reflect.Ptr && !e.IsNil() {
-				result = e
-				continue
-			}
-		}
-
-		if result.Kind() != reflect.Ptr {
-			break
-		}
-
-		if result.IsNil() {
-			result.Set(reflect.New(result.Type().Elem()))
-		}
-		result = result.Elem()
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return result
 }
 
-var sliceType = reflect.TypeOf([]byte{})
-
 func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
+
 	value, newOffset, err := d.decodeBytes(size, offset)
 	if err != nil {
 		return 0, err
 	}
 	switch result.Kind() {
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.Slice:
-		if result.Type() == sliceType {
-			result.SetBytes(value)
-			return newOffset, nil
-		}
+		result.SetBytes(value)
+		return newOffset, nil
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
 }
 
 func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value) (uint, error) {
 	if size != 4 {
-		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
+		return 0, fmt.Errorf("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
 	}
 	value, newOffset, err := d.decodeFloat32(size, offset)
 	if err != nil {
@@ -236,46 +164,41 @@ func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value)
 	}
 
 	switch result.Kind() {
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.Float32, reflect.Float64:
 		result.SetFloat(float64(value))
 		return newOffset, nil
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
 }
 
 func (d *decoder) unmarshalFloat64(size uint, offset uint, result reflect.Value) (uint, error) {
 
 	if size != 8 {
-		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
+		return 0, fmt.Errorf("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
 	}
 	value, newOffset, err := d.decodeFloat64(size, offset)
 	if err != nil {
 		return 0, err
 	}
 	switch result.Kind() {
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.Float32, reflect.Float64:
-		if result.OverflowFloat(value) {
-			return 0, newUnmarshalTypeError(value, result.Type())
-		}
 		result.SetFloat(value)
 		return newOffset, nil
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
 }
 
 func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (uint, error) {
 	if size > 4 {
-		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
+		return 0, fmt.Errorf("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
 	}
 	value, newOffset, err := d.decodeInt(size, offset)
 	if err != nil {
@@ -283,105 +206,77 @@ func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (
 	}
 
 	switch result.Kind() {
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		n := int64(value)
-		if !result.OverflowInt(n) {
-			result.SetInt(n)
-			return newOffset, nil
-		}
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		n := uint64(value)
-		if !result.OverflowUint(n) {
-			result.SetUint(n)
-			return newOffset, nil
-		}
+		result.SetInt(int64(value))
+		return newOffset, nil
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
 }
 
-func (d *decoder) unmarshalMap(
-	size uint,
-	offset uint,
-	result reflect.Value,
-	depth int,
-) (uint, error) {
-	result = d.indirect(result)
+func (d *decoder) unmarshalMap(size uint, offset uint, result reflect.Value) (uint, error) {
 	switch result.Kind() {
 	default:
-		return 0, newUnmarshalTypeError("map", result.Type())
+		return 0, fmt.Errorf("trying to unmarshal a map into %v", result.Type())
 	case reflect.Struct:
-		return d.decodeStruct(size, offset, result, depth)
+		return d.decodeStruct(size, offset, result)
 	case reflect.Map:
-		return d.decodeMap(size, offset, result, depth)
+		return d.decodeMap(size, offset, result)
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			rv := reflect.ValueOf(make(map[string]interface{}, size))
-			newOffset, err := d.decodeMap(size, offset, rv, depth)
-			result.Set(rv)
-			return newOffset, err
-		}
-		return 0, newUnmarshalTypeError("map", result.Type())
+		rv := reflect.ValueOf(make(map[string]interface{}, size))
+		newOffset, err := d.decodeMap(size, offset, rv)
+		result.Set(rv)
+		return newOffset, err
 	}
 }
 
-func (d *decoder) unmarshalPointer(size uint, offset uint, result reflect.Value, depth int) (uint, error) {
-	pointer, newOffset, err := d.decodePointer(size, offset)
-	if err != nil {
-		return 0, err
-	}
-	_, err = d.decode(pointer, result, depth)
+func (d *decoder) unmarshalPointer(size uint, offset uint, result reflect.Value) (uint, error) {
+	pointer, newOffset := d.decodePointer(size, offset)
+	_, err := d.decode(pointer, result)
 	return newOffset, err
 }
 
-func (d *decoder) unmarshalSlice(
-	size uint,
-	offset uint,
-	result reflect.Value,
-	depth int,
-) (uint, error) {
+func (d *decoder) unmarshalSlice(size uint, offset uint, result reflect.Value) (uint, error) {
+
 	switch result.Kind() {
+	default:
+		return 0, fmt.Errorf("trying to unmarshal an array into %v", result.Type())
 	case reflect.Slice:
-		return d.decodeSlice(size, offset, result, depth)
+		return d.decodeSlice(size, offset, result)
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			a := []interface{}{}
-			rv := reflect.ValueOf(&a).Elem()
-			newOffset, err := d.decodeSlice(size, offset, rv, depth)
-			result.Set(rv)
-			return newOffset, err
-		}
+		a := []interface{}{}
+		rv := reflect.ValueOf(&a).Elem()
+		newOffset, err := d.decodeSlice(size, offset, rv)
+		result.Set(rv)
+		return newOffset, err
 	}
-	return 0, newUnmarshalTypeError("array", result.Type())
 }
 
 func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
+
 	value, newOffset, err := d.decodeString(size, offset)
 
 	if err != nil {
 		return 0, err
 	}
 	switch result.Kind() {
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.String:
 		result.SetString(value)
 		return newOffset, nil
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
-
 }
 
 func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, uintType uint) (uint, error) {
 	if size > uintType/8 {
-		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
+		return 0, fmt.Errorf("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
 	}
 
 	value, newOffset, err := d.decodeUint(size, offset)
@@ -390,50 +285,38 @@ func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, ui
 	}
 
 	switch result.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		n := int64(value)
-		if !result.OverflowInt(n) {
-			result.SetInt(n)
-			return newOffset, nil
-		}
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		if !result.OverflowUint(value) {
-			result.SetUint(value)
-			return newOffset, nil
-		}
+		result.SetUint(value)
+		return newOffset, nil
 	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
 }
 
-var bigIntType = reflect.TypeOf(big.Int{})
-
 func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value) (uint, error) {
 	if size > 16 {
-		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
+		return 0, fmt.Errorf("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
 	}
 	value, newOffset, err := d.decodeUint128(size, offset)
 	if err != nil {
 		return 0, err
 	}
 
+	// XXX - this should allow *big.Int rather than just bigInt
+	// Currently this is reported as invalid
 	switch result.Kind() {
+	default:
+		return newOffset, fmt.Errorf("trying to unmarshal %v into %v", value, result.Type())
 	case reflect.Struct:
-		if result.Type() == bigIntType {
-			result.Set(reflect.ValueOf(*value))
-			return newOffset, nil
-		}
-	case reflect.Interface:
-		if result.NumMethod() == 0 {
-			result.Set(reflect.ValueOf(value))
-			return newOffset, nil
-		}
+		result.Set(reflect.ValueOf(*value))
+		return newOffset, nil
+	case reflect.Interface, reflect.Ptr:
+		result.Set(reflect.ValueOf(value))
+		return newOffset, nil
 	}
-	return newOffset, newUnmarshalTypeError(value, result.Type())
 }
 
 func (d *decoder) decodeBool(size uint, offset uint) (bool, uint, error) {
@@ -449,71 +332,67 @@ func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint, error) {
 
 func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint, error) {
 	newOffset := offset + size
-	bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
-	return math.Float64frombits(bits), newOffset, nil
+	var dbl float64
+	binary.Read(bytes.NewBuffer(d.buffer[offset:newOffset]), binary.BigEndian, &dbl)
+	return dbl, newOffset, nil
 }
 
 func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint, error) {
 	newOffset := offset + size
-	bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
-	return math.Float32frombits(bits), newOffset, nil
+	var flt float32
+	binary.Read(bytes.NewBuffer(d.buffer[offset:newOffset]), binary.BigEndian, &flt)
+	return flt, newOffset, nil
 }
 
 func (d *decoder) decodeInt(size uint, offset uint) (int, uint, error) {
 	newOffset := offset + size
-	var val int32
-	for _, b := range d.buffer[offset:newOffset] {
-		val = (val << 8) | int32(b)
+	intBytes := d.buffer[offset:newOffset]
+	if size != 4 {
+		pad := make([]byte, 4-size)
+		intBytes = append(pad, intBytes...)
 	}
+
+	var val int32
+	binary.Read(bytes.NewBuffer(intBytes), binary.BigEndian, &val)
+
 	return int(val), newOffset, nil
 }
 
-func (d *decoder) decodeMap(
-	size uint,
-	offset uint,
-	result reflect.Value,
-	depth int,
-) (uint, error) {
+func (d *decoder) decodeMap(size uint, offset uint, result reflect.Value) (uint, error) {
 	if result.IsNil() {
 		result.Set(reflect.MakeMap(result.Type()))
 	}
 
 	for i := uint(0); i < size; i++ {
-		var key []byte
+		var key string
 		var err error
-		key, offset, err = d.decodeKey(offset)
+		key, offset, err = d.decodeKeyString(offset)
 
 		if err != nil {
 			return 0, err
 		}
 
 		value := reflect.New(result.Type().Elem())
-		offset, err = d.decode(offset, value, depth)
+		offset, err = d.decode(offset, value)
 		if err != nil {
 			return 0, err
 		}
-		result.SetMapIndex(reflect.ValueOf(string(key)), value.Elem())
+		result.SetMapIndex(reflect.ValueOf(key), value.Elem())
 	}
 	return offset, nil
 }
 
-func (d *decoder) decodePointer(
-	size uint,
-	offset uint,
-) (uint, uint, error) {
+func (d *decoder) decodePointer(size uint, offset uint) (uint, uint) {
 	pointerSize := ((size >> 3) & 0x3) + 1
 	newOffset := offset + pointerSize
-	if newOffset > uint(len(d.buffer)) {
-		return 0, 0, newOffsetError()
-	}
 	pointerBytes := d.buffer[offset:newOffset]
-	var prefix uint
+	var prefix uint64
 	if pointerSize == 4 {
 		prefix = 0
 	} else {
-		prefix = uint(size & 0x7)
+		prefix = uint64(size & 0x7)
 	}
-	unpacked := uintFromBytes(prefix, pointerBytes)
+	unpacked := uint(uintFromBytes(prefix, pointerBytes))
 
 	var pointerValueOffset uint
 	switch pointerSize {
@@ -527,21 +406,16 @@ func (d *decoder) decodePointer(
 		pointerValueOffset = 0
 	}
 
-	pointer := unpacked + pointerValueOffset
+	pointer := unpacked + d.pointerBase + pointerValueOffset
 
-	return pointer, newOffset, nil
+	return pointer, newOffset
 }
 
-func (d *decoder) decodeSlice(
-	size uint,
-	offset uint,
-	result reflect.Value,
-	depth int,
-) (uint, error) {
+func (d *decoder) decodeSlice(size uint, offset uint, result reflect.Value) (uint, error) {
 	result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
 	for i := 0; i < int(size); i++ {
 		var err error
-		offset, err = d.decode(offset, result.Index(i), depth)
+		offset, err = d.decode(offset, result.Index(i))
 		if err != nil {
 			return 0, err
 		}
@@ -554,83 +428,36 @@ func (d *decoder) decodeString(size uint, offset uint) (string, uint, error) {
 	return string(d.buffer[offset:newOffset]), newOffset, nil
 }
 
-type fieldsType struct {
-	namedFields     map[string]int
-	anonymousFields []int
-}
-
-var (
-	fieldMap   = map[reflect.Type]*fieldsType{}
-	fieldMapMu sync.RWMutex
-)
-
-func (d *decoder) decodeStruct(
-	size uint,
-	offset uint,
-	result reflect.Value,
-	depth int,
-) (uint, error) {
+func (d *decoder) decodeStruct(size uint, offset uint, result reflect.Value) (uint, error) {
 	resultType := result.Type()
+	numFields := resultType.NumField()
 
-	fieldMapMu.RLock()
-	fields, ok := fieldMap[resultType]
-	fieldMapMu.RUnlock()
-	if !ok {
-		numFields := resultType.NumField()
-		namedFields := make(map[string]int, numFields)
-		var anonymous []int
-		for i := 0; i < numFields; i++ {
-			field := resultType.Field(i)
-
-			fieldName := field.Name
-			if tag := field.Tag.Get("maxminddb"); tag != "" {
-				if tag == "-" {
-					continue
-				}
-				fieldName = tag
-			}
-			if field.Anonymous {
-				anonymous = append(anonymous, i)
-				continue
-			}
-			namedFields[fieldName] = i
-		}
-		fieldMapMu.Lock()
-		fields = &fieldsType{namedFields, anonymous}
-		fieldMap[resultType] = fields
-		fieldMapMu.Unlock()
-	}
+	fields := make(map[string]reflect.Value, numFields)
+	for i := 0; i < numFields; i++ {
+		fieldType := resultType.Field(i)
 
-	// This fills in embedded structs
-	for i := range fields.anonymousFields {
-		_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
-		if err != nil {
-			return 0, err
+		fieldName := fieldType.Name
+		tag := fieldType.Tag.Get("maxminddb")
+		if tag != "" {
+			fieldName = tag
 		}
+		fields[fieldName] = result.Field(i)
 	}
 
-	// This handles named fields
 	for i := uint(0); i < size; i++ {
-		var (
-			err error
-			key []byte
-		)
-		key, offset, err = d.decodeKey(offset)
+
+		var key string
+		var err error
+		key, offset, err = d.decodeKeyString(offset)
 		if err != nil {
 			return 0, err
 		}
-		// The string() does not create a copy due to this compiler
-		// optimization: https://github.com/golang/go/issues/3512
-		j, ok := fields.namedFields[string(key)]
+		field, ok := fields[key]
 		if !ok {
-			offset, err = d.nextValueOffset(offset, 1)
-			if err != nil {
-				return 0, err
-			}
+			offset = d.nextValueOffset(offset, 1)
 			continue
 		}
-
-		offset, err = d.decode(offset, result.Field(j), depth)
+		offset, err = d.decode(offset, field)
 		if err != nil {
 			return 0, err
 		}
@@ -640,12 +467,8 @@ func (d *decoder) decodeStruct(
 
 func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
 	newOffset := offset + size
-	bytes := d.buffer[offset:newOffset]
+	val := uintFromBytes(0, d.buffer[offset:newOffset])
 
-	var val uint64
-	for _, b := range bytes {
-		val = (val << 8) | uint64(b)
-	}
 	return val, newOffset, nil
 }
 
@@ -657,58 +480,38 @@ func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint, error)
 	return val, newOffset, nil
 }
 
-func uintFromBytes(prefix uint, uintBytes []byte) uint {
+func uintFromBytes(prefix uint64, uintBytes []byte) uint64 {
 	val := prefix
 	for _, b := range uintBytes {
-		val = (val << 8) | uint(b)
+		val = (val << 8) | uint64(b)
 	}
 	return val
 }
 
-// decodeKey decodes a map key into []byte slice. We use a []byte so that we
-// can take advantage of https://github.com/golang/go/issues/3512 to avoid
-// copying the bytes when decoding a struct. Previously, we achieved this by
-// using unsafe.
-func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
-	typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
-	if err != nil {
-		return nil, 0, err
-	}
+func (d *decoder) decodeKeyString(offset uint) (string, uint, error) {
+	typeNum, size, newOffset := d.decodeCtrlData(offset)
 	if typeNum == _Pointer {
-		pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
-		if err != nil {
-			return nil, 0, err
-		}
-		key, _, err := d.decodeKey(pointer)
+		pointer, ptrOffset := d.decodePointer(size, newOffset)
+		key, _, err := d.decodeKeyString(pointer)
 		return key, ptrOffset, err
 	}
 	if typeNum != _String {
-		return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
+		return "", 0, fmt.Errorf("unexpected type when decoding string: %v", typeNum)
 	}
-	newOffset := dataOffset + size
-	if newOffset > uint(len(d.buffer)) {
-		return nil, 0, newOffsetError()
-	}
-	return d.buffer[dataOffset:newOffset], newOffset, nil
+	return d.decodeString(size, newOffset)
 }
 
 // This function is used to skip ahead to the next value without decoding
 // the one at the offset passed in. The size bits have different meanings for
 // different data types
-func (d *decoder) nextValueOffset(offset uint, numberToSkip uint) (uint, error) {
+func (d *decoder) nextValueOffset(offset uint, numberToSkip uint) uint {
 	if numberToSkip == 0 {
-		return offset, nil
-	}
-	typeNum, size, offset, err := d.decodeCtrlData(offset)
-	if err != nil {
-		return 0, err
+		return offset
 	}
+	typeNum, size, offset := d.decodeCtrlData(offset)
 	switch typeNum {
 	case _Pointer:
-		_, offset, err = d.decodePointer(size, offset)
-		if err != nil {
-			return 0, err
-		}
+		_, offset = d.decodePointer(size, offset)
 	case _Map:
 		numberToSkip += 2 * size
 	case _Slice:
diff --git a/vendor/github.com/oschwald/maxminddb-golang/errors.go b/vendor/github.com/oschwald/maxminddb-golang/errors.go
deleted file mode 100644
index 1327800..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/errors.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package maxminddb
-
-import (
-	"fmt"
-	"reflect"
-)
-
-// InvalidDatabaseError is returned when the database contains invalid data
-// and cannot be parsed.
-type InvalidDatabaseError struct {
-	message string
-}
-
-func newOffsetError() InvalidDatabaseError {
-	return InvalidDatabaseError{"unexpected end of database"}
-}
-
-func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
-	return InvalidDatabaseError{fmt.Sprintf(format, args...)}
-}
-
-func (e InvalidDatabaseError) Error() string {
-	return e.message
-}
-
-// UnmarshalTypeError is returned when the value in the database cannot be
-// assigned to the specified data type.
-type UnmarshalTypeError struct {
-	Value string       // stringified copy of the database value that caused the error
-	Type  reflect.Type // type of the value that could not be assign to
-}
-
-func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
-	return UnmarshalTypeError{
-		Value: fmt.Sprintf("%v", value),
-		Type:  rType,
-	}
-}
-
-func (e UnmarshalTypeError) Error() string {
-	return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
-}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
deleted file mode 100644
index 99f98ca..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !windows,!appengine
-
-package maxminddb
-
-import (
-	"syscall"
-
-	"golang.org/x/sys/unix"
-)
-
-func mmap(fd int, length int) (data []byte, err error) {
-	return unix.Mmap(fd, 0, length, syscall.PROT_READ, syscall.MAP_SHARED)
-}
-
-func munmap(b []byte) (err error) {
-	return unix.Munmap(b)
-}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader.go b/vendor/github.com/oschwald/maxminddb-golang/reader.go
index bc933e9..984440b 100644
--- a/vendor/github.com/oschwald/maxminddb-golang/reader.go
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader.go
@@ -5,16 +5,11 @@ import (
 	"errors"
 	"fmt"
 	"net"
+	"os"
 	"reflect"
 )
 
-const (
-	// NotFound is returned by LookupOffset when a matched root record offset
-	// cannot be found.
-	NotFound = ^uintptr(0)
-
-	dataSectionSeparatorSize = 16
-)
+const dataSectionSeparatorSize = 16
 
 var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
 
@@ -44,42 +39,62 @@ type Metadata struct {
 	RecordSize               uint              `maxminddb:"record_size"`
 }
 
+// Open takes a string path to a MaxMind DB file and returns a Reader
+// structure or an error. The database file is opened using a memory map. Use
+// the Close method on the Reader object to return the resources to the
+// system.
+func Open(file string) (*Reader, error) {
+	mapFile, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer mapFile.Close()
+
+	stats, err := mapFile.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	fileSize := int(stats.Size())
+	mmap, err := mmap(int(mapFile.Fd()), fileSize)
+	if err != nil {
+		return nil, err
+	}
+
+	reader, err := FromBytes(mmap)
+	if err != nil {
+		munmap(mmap)
+		return nil, err
+	}
+
+	reader.hasMappedFile = true
+	return reader, nil
+}
+
 // FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
 // a Reader structure or an error.
 func FromBytes(buffer []byte) (*Reader, error) {
 	metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
 
 	if metadataStart == -1 {
-		return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
+		return nil, fmt.Errorf("error opening database file: invalid MaxMind DB file")
 	}
 
 	metadataStart += len(metadataStartMarker)
-	metadataDecoder := decoder{buffer[metadataStart:]}
+	metadataDecoder := decoder{buffer, uint(metadataStart)}
 
 	var metadata Metadata
 
 	rvMetdata := reflect.ValueOf(&metadata)
-	_, err := metadataDecoder.decode(0, rvMetdata, 0)
+	_, err := metadataDecoder.decode(uint(metadataStart), rvMetdata)
 	if err != nil {
 		return nil, err
 	}
 
 	searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
-	dataSectionStart := searchTreeSize + dataSectionSeparatorSize
-	dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
-	if dataSectionStart > dataSectionEnd {
-		return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
-	}
-	d := decoder{
-		buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
-	}
+	decoder := decoder{buffer, searchTreeSize + dataSectionSeparatorSize}
 
-	reader := &Reader{
-		buffer:    buffer,
-		decoder:   d,
-		Metadata:  metadata,
-		ipv4Start: 0,
-	}
+	reader := &Reader{buffer: buffer, decoder: decoder, Metadata: metadata, ipv4Start: 0}
 
 	reader.ipv4Start, err = reader.startNode()
 
@@ -105,57 +120,22 @@ func (r *Reader) startNode() (uint, error) {
 }
 
 // Lookup takes an IP address as a net.IP structure and a pointer to the
-// result value to Decode into.
-func (r *Reader) Lookup(ipAddress net.IP, result interface{}) error {
-	pointer, err := r.lookupPointer(ipAddress)
-	if pointer == 0 || err != nil {
-		return err
-	}
-	return r.retrieveData(pointer, result)
-}
-
-// LookupOffset maps an argument net.IP to a corresponding record offset in the
-// database. NotFound is returned if no such record is found, and a record may
-// otherwise be extracted by passing the returned offset to Decode. LookupOffset
-// is an advanced API, which exists to provide clients with a means to cache
-// previously-decoded records.
-func (r *Reader) LookupOffset(ipAddress net.IP) (uintptr, error) {
-	pointer, err := r.lookupPointer(ipAddress)
-	if pointer == 0 || err != nil {
-		return NotFound, err
-	}
-	return r.resolveDataPointer(pointer)
-}
-
-// Decode the record at |offset| into |result|. The result value pointed to
-// must be a data value that corresponds to a record in the database. This may
-// include a struct representation of the data, a map capable of holding the
-// data or an empty interface{} value.
+// result value to decode into. The result value pointed to must be a data
+// value that corresponds to a record in the database. This may include a
+// struct representation of the data, a map capable of holding the data or an
+// empty interface{} value.
 //
 // If result is a pointer to a struct, the struct need not include a field
 // for every value that may be in the database. If a field is not present in
 // the structure, the decoder will not decode that field, reducing the time
 // required to decode the record.
 //
-// As a special case, a struct field of type uintptr will be used to capture
-// the offset of the value. Decode may later be used to extract the stored
-// value from the offset. MaxMind DBs are highly normalized: for example in
-// the City database, all records of the same country will reference a
-// single representative record for that country. This uintptr behavior allows
-// clients to leverage this normalization in their own sub-record caching.
-func (r *Reader) Decode(offset uintptr, result interface{}) error {
-	rv := reflect.ValueOf(result)
-	if rv.Kind() != reflect.Ptr || rv.IsNil() {
-		return errors.New("result param must be a pointer")
-	}
-
-	_, err := r.decoder.decode(uint(offset), reflect.ValueOf(result), 0)
-	return err
-}
-
-func (r *Reader) lookupPointer(ipAddress net.IP) (uint, error) {
+// Currently the decoder expect most data types to correspond exactly (e.g.,
+// a uint64 database type must be decoded into a uint64 Go type). In the
+// future, this may be made more flexible.
+func (r *Reader) Lookup(ipAddress net.IP, result interface{}) error {
 	if ipAddress == nil {
-		return 0, errors.New("ipAddress passed to Lookup cannot be nil")
+		return errors.New("ipAddress passed to Lookup cannot be nil")
 	}
 
 	ipV4Address := ipAddress.To4()
@@ -163,10 +143,20 @@ func (r *Reader) lookupPointer(ipAddress net.IP) (uint, error) {
 		ipAddress = ipV4Address
 	}
 	if len(ipAddress) == 16 && r.Metadata.IPVersion == 4 {
-		return 0, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ipAddress.String())
+		return fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ipAddress.String())
+	}
+
+	pointer, err := r.findAddressInTree(ipAddress)
+
+	if pointer == 0 {
+		return err
 	}
 
-	return r.findAddressInTree(ipAddress)
+	rv := reflect.ValueOf(result)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		return errors.New("result param for Lookup must be a pointer")
+	}
+	return r.resolveDataPointer(pointer, rv)
 }
 
 func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
@@ -196,7 +186,7 @@ func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
 		return node, nil
 	}
 
-	return 0, newInvalidDatabaseError("invalid node in search tree")
+	return 0, errors.New("invalid node in search tree")
 }
 
 func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
@@ -205,13 +195,13 @@ func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
 	baseOffset := nodeNumber * RecordSize / 4
 
 	var nodeBytes []byte
-	var prefix uint
+	var prefix uint64
 	switch RecordSize {
 	case 24:
 		offset := baseOffset + index*3
 		nodeBytes = r.buffer[offset : offset+3]
 	case 28:
-		prefix = uint(r.buffer[baseOffset+3])
+		prefix = uint64(r.buffer[baseOffset+3])
 		if index != 0 {
 			prefix &= 0x0F
 		} else {
@@ -223,24 +213,31 @@ func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
 		offset := baseOffset + index*4
 		nodeBytes = r.buffer[offset : offset+4]
 	default:
-		return 0, newInvalidDatabaseError("unknown record size: %d", RecordSize)
+		return 0, fmt.Errorf("unknown record size: %d", RecordSize)
 	}
-	return uintFromBytes(prefix, nodeBytes), nil
+	return uint(uintFromBytes(prefix, nodeBytes)), nil
 }
 
-func (r *Reader) retrieveData(pointer uint, result interface{}) error {
-	offset, err := r.resolveDataPointer(pointer)
-	if err != nil {
-		return err
+func (r *Reader) resolveDataPointer(pointer uint, result reflect.Value) error {
+	nodeCount := r.Metadata.NodeCount
+	searchTreeSize := r.Metadata.RecordSize * nodeCount / 4
+
+	resolved := pointer - nodeCount + searchTreeSize
+
+	if resolved > uint(len(r.buffer)) {
+		return errors.New("the MaxMind DB file's search tree is corrupt")
 	}
-	return r.Decode(offset, result)
-}
 
-func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
-	var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
+	_, err := r.decoder.decode(resolved, result)
+	return err
+}
 
-	if resolved > uintptr(len(r.buffer)) {
-		return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system. If called on a Reader opened using FromBytes,
+// this method does nothing.
+func (r *Reader) Close() {
+	if r.hasMappedFile {
+		munmap(r.buffer)
+		r.hasMappedFile = false
 	}
-	return resolved, nil
 }
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
deleted file mode 100644
index 631e195..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build appengine
-
-package maxminddb
-
-import "io/ioutil"
-
-// Open takes a string path to a MaxMind DB file and returns a Reader
-// structure or an error. The database file is opened using a memory map,
-// except on Google App Engine where mmap is not supported; there the database
-// is loaded into memory. Use the Close method on the Reader object to return
-// the resources to the system.
-func Open(file string) (*Reader, error) {
-	bytes, err := ioutil.ReadFile(file)
-	if err != nil {
-		return nil, err
-	}
-
-	return FromBytes(bytes)
-}
-
-// Close unmaps the database file from virtual memory and returns the
-// resources to the system. If called on a Reader opened using FromBytes
-// or Open on Google App Engine, this method does nothing.
-func (r *Reader) Close() error {
-	return nil
-}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_other.go b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
deleted file mode 100644
index b611a95..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// +build !appengine
-
-package maxminddb
-
-import (
-	"os"
-	"runtime"
-)
-
-// Open takes a string path to a MaxMind DB file and returns a Reader
-// structure or an error. The database file is opened using a memory map,
-// except on Google App Engine where mmap is not supported; there the database
-// is loaded into memory. Use the Close method on the Reader object to return
-// the resources to the system.
-func Open(file string) (*Reader, error) {
-	mapFile, err := os.Open(file)
-	if err != nil {
-		return nil, err
-	}
-	defer func() {
-		if rerr := mapFile.Close(); rerr != nil {
-			err = rerr
-		}
-	}()
-
-	stats, err := mapFile.Stat()
-	if err != nil {
-		return nil, err
-	}
-
-	fileSize := int(stats.Size())
-	mmap, err := mmap(int(mapFile.Fd()), fileSize)
-	if err != nil {
-		return nil, err
-	}
-
-	reader, err := FromBytes(mmap)
-	if err != nil {
-		if err2 := munmap(mmap); err2 != nil {
-			// failing to unmap the file is probably the more severe error
-			return nil, err2
-		}
-		return nil, err
-	}
-
-	reader.hasMappedFile = true
-	runtime.SetFinalizer(reader, (*Reader).Close)
-	return reader, err
-}
-
-// Close unmaps the database file from virtual memory and returns the
-// resources to the system. If called on a Reader opened using FromBytes
-// or Open on Google App Engine, this method does nothing.
-func (r *Reader) Close() error {
-	if !r.hasMappedFile {
-		return nil
-	}
-	runtime.SetFinalizer(r, nil)
-	r.hasMappedFile = false
-	return munmap(r.buffer)
-}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_unix.go b/vendor/github.com/oschwald/maxminddb-golang/reader_unix.go
new file mode 100644
index 0000000..e6ba9fd
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_unix.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package maxminddb
+
+import (
+	"syscall"
+)
+
+func mmap(fd int, length int) (data []byte, err error) {
+	return syscall.Mmap(fd, 0, length, syscall.PROT_READ, syscall.MAP_SHARED)
+}
+
+func munmap(b []byte) (err error) {
+	return syscall.Munmap(b)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go b/vendor/github.com/oschwald/maxminddb-golang/reader_windows.go
similarity index 70%
rename from vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
rename to vendor/github.com/oschwald/maxminddb-golang/reader_windows.go
index 661250e..b8e2466 100644
--- a/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_windows.go
@@ -1,37 +1,33 @@
-// +build windows,!appengine
-
-package maxminddb
-
 // Windows support largely borrowed from mmap-go.
 //
 // Copyright 2011 Evan Shaw. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
+package maxminddb
 
 import (
 	"errors"
 	"os"
 	"reflect"
 	"sync"
+	"syscall"
 	"unsafe"
-
-	"golang.org/x/sys/windows"
 )
 
-type memoryMap []byte
+type MMap []byte
 
 // Windows
 var handleLock sync.Mutex
-var handleMap = map[uintptr]windows.Handle{}
+var handleMap = map[uintptr]syscall.Handle{}
 
 func mmap(fd int, length int) (data []byte, err error) {
-	h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
-		uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
+	h, errno := syscall.CreateFileMapping(syscall.Handle(fd), nil,
+		uint32(syscall.PAGE_READONLY), 0, uint32(length), nil)
 	if h == 0 {
 		return nil, os.NewSyscallError("CreateFileMapping", errno)
 	}
 
-	addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
+	addr, errno := syscall.MapViewOfFile(h, uint32(syscall.FILE_MAP_READ), 0,
 		0, uintptr(length))
 	if addr == 0 {
 		return nil, os.NewSyscallError("MapViewOfFile", errno)
@@ -40,7 +36,7 @@ func mmap(fd int, length int) (data []byte, err error) {
 	handleMap[addr] = h
 	handleLock.Unlock()
 
-	m := memoryMap{}
+	m := MMap{}
 	dh := m.header()
 	dh.Data = addr
 	dh.Len = length
@@ -49,24 +45,24 @@ func mmap(fd int, length int) (data []byte, err error) {
 	return m, nil
 }
 
-func (m *memoryMap) header() *reflect.SliceHeader {
+func (m *MMap) header() *reflect.SliceHeader {
 	return (*reflect.SliceHeader)(unsafe.Pointer(m))
 }
 
 func flush(addr, len uintptr) error {
-	errno := windows.FlushViewOfFile(addr, len)
+	errno := syscall.FlushViewOfFile(addr, len)
 	return os.NewSyscallError("FlushViewOfFile", errno)
 }
 
 func munmap(b []byte) (err error) {
-	m := memoryMap(b)
+	m := MMap(b)
 	dh := m.header()
 
 	addr := dh.Data
 	length := uintptr(dh.Len)
 
 	flush(addr, length)
-	err = windows.UnmapViewOfFile(addr)
+	err = syscall.UnmapViewOfFile(addr)
 	if err != nil {
 		return err
 	}
@@ -80,6 +76,6 @@ func munmap(b []byte) (err error) {
 	}
 	delete(handleMap, addr)
 
-	e := windows.CloseHandle(windows.Handle(handle))
+	e := syscall.CloseHandle(syscall.Handle(handle))
 	return os.NewSyscallError("CloseHandle", e)
 }
diff --git a/vendor/github.com/oschwald/maxminddb-golang/traverse.go b/vendor/github.com/oschwald/maxminddb-golang/traverse.go
deleted file mode 100644
index f9b443c..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/traverse.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package maxminddb
-
-import "net"
-
-// Internal structure used to keep track of nodes we still need to visit.
-type netNode struct {
-	ip      net.IP
-	bit     uint
-	pointer uint
-}
-
-// Networks represents a set of subnets that we are iterating over.
-type Networks struct {
-	reader   *Reader
-	nodes    []netNode // Nodes we still have to visit.
-	lastNode netNode
-	err      error
-}
-
-// Networks returns an iterator that can be used to traverse all networks in
-// the database.
-//
-// Please note that a MaxMind DB may map IPv4 networks into several locations
-// in in an IPv6 database. This iterator will iterate over all of these
-// locations separately.
-func (r *Reader) Networks() *Networks {
-	s := 4
-	if r.Metadata.IPVersion == 6 {
-		s = 16
-	}
-	return &Networks{
-		reader: r,
-		nodes: []netNode{
-			{
-				ip: make(net.IP, s),
-			},
-		},
-	}
-}
-
-// Next prepares the next network for reading with the Network method. It
-// returns true if there is another network to be processed and false if there
-// are no more networks or if there is an error.
-func (n *Networks) Next() bool {
-	for len(n.nodes) > 0 {
-		node := n.nodes[len(n.nodes)-1]
-		n.nodes = n.nodes[:len(n.nodes)-1]
-
-		for {
-			if node.pointer < n.reader.Metadata.NodeCount {
-				ipRight := make(net.IP, len(node.ip))
-				copy(ipRight, node.ip)
-				if len(ipRight) <= int(node.bit>>3) {
-					n.err = newInvalidDatabaseError(
-						"invalid search tree at %v/%v", ipRight, node.bit)
-					return false
-				}
-				ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
-
-				rightPointer, err := n.reader.readNode(node.pointer, 1)
-				if err != nil {
-					n.err = err
-					return false
-				}
-
-				node.bit++
-				n.nodes = append(n.nodes, netNode{
-					pointer: rightPointer,
-					ip:      ipRight,
-					bit:     node.bit,
-				})
-
-				node.pointer, err = n.reader.readNode(node.pointer, 0)
-				if err != nil {
-					n.err = err
-					return false
-				}
-
-			} else if node.pointer > n.reader.Metadata.NodeCount {
-				n.lastNode = node
-				return true
-			} else {
-				break
-			}
-		}
-	}
-
-	return false
-}
-
-// Network returns the current network or an error if there is a problem
-// decoding the data for the network. It takes a pointer to a result value to
-// decode the network's data into.
-func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
-	if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
-		return nil, err
-	}
-
-	return &net.IPNet{
-		IP:   n.lastNode.ip,
-		Mask: net.CIDRMask(int(n.lastNode.bit), len(n.lastNode.ip)*8),
-	}, nil
-}
-
-// Err returns an error, if any, that was encountered during iteration.
-func (n *Networks) Err() error {
-	return n.err
-}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/verifier.go b/vendor/github.com/oschwald/maxminddb-golang/verifier.go
deleted file mode 100644
index ace9d35..0000000
--- a/vendor/github.com/oschwald/maxminddb-golang/verifier.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package maxminddb
-
-import "reflect"
-
-type verifier struct {
-	reader *Reader
-}
-
-// Verify checks that the database is valid. It validates the search tree,
-// the data section, and the metadata section. This verifier is stricter than
-// the specification and may return errors on databases that are readable.
-func (r *Reader) Verify() error {
-	v := verifier{r}
-	if err := v.verifyMetadata(); err != nil {
-		return err
-	}
-
-	return v.verifyDatabase()
-}
-
-func (v *verifier) verifyMetadata() error {
-	metadata := v.reader.Metadata
-
-	if metadata.BinaryFormatMajorVersion != 2 {
-		return testError(
-			"binary_format_major_version",
-			2,
-			metadata.BinaryFormatMajorVersion,
-		)
-	}
-
-	if metadata.BinaryFormatMinorVersion != 0 {
-		return testError(
-			"binary_format_minor_version",
-			0,
-			metadata.BinaryFormatMinorVersion,
-		)
-	}
-
-	if metadata.DatabaseType == "" {
-		return testError(
-			"database_type",
-			"non-empty string",
-			metadata.DatabaseType,
-		)
-	}
-
-	if len(metadata.Description) == 0 {
-		return testError(
-			"description",
-			"non-empty slice",
-			metadata.Description,
-		)
-	}
-
-	if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
-		return testError(
-			"ip_version",
-			"4 or 6",
-			metadata.IPVersion,
-		)
-	}
-
-	if metadata.RecordSize != 24 &&
-		metadata.RecordSize != 28 &&
-		metadata.RecordSize != 32 {
-		return testError(
-			"record_size",
-			"24, 28, or 32",
-			metadata.RecordSize,
-		)
-	}
-
-	if metadata.NodeCount == 0 {
-		return testError(
-			"node_count",
-			"positive integer",
-			metadata.NodeCount,
-		)
-	}
-	return nil
-}
-
-func (v *verifier) verifyDatabase() error {
-	offsets, err := v.verifySearchTree()
-	if err != nil {
-		return err
-	}
-
-	if err := v.verifyDataSectionSeparator(); err != nil {
-		return err
-	}
-
-	return v.verifyDataSection(offsets)
-}
-
-func (v *verifier) verifySearchTree() (map[uint]bool, error) {
-	offsets := make(map[uint]bool)
-
-	it := v.reader.Networks()
-	for it.Next() {
-		offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
-		if err != nil {
-			return nil, err
-		}
-		offsets[uint(offset)] = true
-	}
-	if err := it.Err(); err != nil {
-		return nil, err
-	}
-	return offsets, nil
-}
-
-func (v *verifier) verifyDataSectionSeparator() error {
-	separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
-
-	separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
-
-	for _, b := range separator {
-		if b != 0 {
-			return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
-		}
-	}
-	return nil
-}
-
-func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
-	pointerCount := len(offsets)
-
-	decoder := v.reader.decoder
-
-	var offset uint
-	bufferLen := uint(len(decoder.buffer))
-	for offset < bufferLen {
-		var data interface{}
-		rv := reflect.ValueOf(&data)
-		newOffset, err := decoder.decode(offset, rv, 0)
-		if err != nil {
-			return newInvalidDatabaseError("received decoding error (%v) at offset of %v", err, offset)
-		}
-		if newOffset <= offset {
-			return newInvalidDatabaseError("data section offset unexpectedly went from %v to %v", offset, newOffset)
-		}
-
-		pointer := offset
-
-		if _, ok := offsets[pointer]; ok {
-			delete(offsets, pointer)
-		} else {
-			return newInvalidDatabaseError("found data (%v) at %v that the search tree does not point to", data, pointer)
-		}
-
-		offset = newOffset
-	}
-
-	if offset != bufferLen {
-		return newInvalidDatabaseError(
-			"unexpected data at the end of the data section (last offset: %v, end: %v)",
-			offset,
-			bufferLen,
-		)
-	}
-
-	if len(offsets) != 0 {
-		return newInvalidDatabaseError(
-			"found %v pointers (of %v) in the search tree that we did not see in the data section",
-			len(offsets),
-			pointerCount,
-		)
-	}
-	return nil
-}
-
-func testError(
-	field string,
-	expected interface{},
-	actual interface{},
-) error {
-	return newInvalidDatabaseError(
-		"%v - Expected: %v Actual: %v",
-		field,
-		expected,
-		actual,
-	)
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 85cffe7..ebede5f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -72,7 +72,7 @@ github.com/openzipkin/zipkin-go/model
 github.com/openzipkin/zipkin-go/propagation
 github.com/openzipkin/zipkin-go/reporter
 github.com/openzipkin/zipkin-go/reporter/http
-# github.com/oschwald/maxminddb-golang v0.0.0-20170901134056-26fe5ace1c70
+# github.com/oschwald/maxminddb-golang v0.2.0
 ## explicit
 github.com/oschwald/maxminddb-golang
 # github.com/pkg/errors v0.9.1
-- 
GitLab