From 7c13b6dcd9bf565024a06d22c9a0b145f64ea2f8 Mon Sep 17 00:00:00 2001
From: ale <ale@incal.net>
Date: Mon, 18 Jan 2016 11:42:47 +0000
Subject: [PATCH] switch to github.com/syndtr/goleveldb

---
 Godeps/Godeps.json                            |   17 +
 Godeps/Readme                                 |    5 +
 Godeps/_workspace/.gitignore                  |    2 +
 .../src/github.com/golang/snappy/AUTHORS      |   14 +
 .../src/github.com/golang/snappy/CONTRIBUTORS |   36 +
 .../src/github.com/golang/snappy/LICENSE      |   27 +
 .../src/github.com/golang/snappy/README       |    7 +
 .../src/github.com/golang/snappy/decode.go    |  294 ++
 .../src/github.com/golang/snappy/encode.go    |  254 ++
 .../src/github.com/golang/snappy/snappy.go    |   68 +
 .../github.com/golang/snappy/snappy_test.go   |  377 +++
 .../syndtr/goleveldb/leveldb/batch.go         |  252 ++
 .../syndtr/goleveldb/leveldb/batch_test.go    |  120 +
 .../syndtr/goleveldb/leveldb/bench2_test.go   |   58 +
 .../syndtr/goleveldb/leveldb/bench_test.go    |  464 +++
 .../goleveldb/leveldb/cache/bench2_test.go    |   30 +
 .../syndtr/goleveldb/leveldb/cache/cache.go   |  676 +++++
 .../goleveldb/leveldb/cache/cache_test.go     |  554 ++++
 .../syndtr/goleveldb/leveldb/cache/lru.go     |  195 ++
 .../syndtr/goleveldb/leveldb/comparer.go      |   75 +
 .../leveldb/comparer/bytes_comparer.go        |   51 +
 .../goleveldb/leveldb/comparer/comparer.go    |   57 +
 .../syndtr/goleveldb/leveldb/corrupt_test.go  |  500 +++
 .../github.com/syndtr/goleveldb/leveldb/db.go | 1070 +++++++
 .../syndtr/goleveldb/leveldb/db_compaction.go |  791 +++++
 .../syndtr/goleveldb/leveldb/db_iter.go       |  350 +++
 .../syndtr/goleveldb/leveldb/db_snapshot.go   |  183 ++
 .../syndtr/goleveldb/leveldb/db_state.go      |  211 ++
 .../syndtr/goleveldb/leveldb/db_test.go       | 2701 +++++++++++++++++
 .../syndtr/goleveldb/leveldb/db_util.go       |  100 +
 .../syndtr/goleveldb/leveldb/db_write.go      |  338 +++
 .../syndtr/goleveldb/leveldb/doc.go           |   90 +
 .../syndtr/goleveldb/leveldb/errors.go        |   19 +
 .../syndtr/goleveldb/leveldb/errors/errors.go |   78 +
 .../syndtr/goleveldb/leveldb/external_test.go |   58 +
 .../syndtr/goleveldb/leveldb/filter.go        |   31 +
 .../syndtr/goleveldb/leveldb/filter/bloom.go  |  116 +
 .../goleveldb/leveldb/filter/bloom_test.go    |  142 +
 .../syndtr/goleveldb/leveldb/filter/filter.go |   60 +
 .../goleveldb/leveldb/iterator/array_iter.go  |  184 ++
 .../leveldb/iterator/array_iter_test.go       |   30 +
 .../leveldb/iterator/indexed_iter.go          |  242 ++
 .../leveldb/iterator/indexed_iter_test.go     |   83 +
 .../syndtr/goleveldb/leveldb/iterator/iter.go |  131 +
 .../leveldb/iterator/iter_suite_test.go       |   11 +
 .../goleveldb/leveldb/iterator/merged_iter.go |  304 ++
 .../leveldb/iterator/merged_iter_test.go      |   60 +
 .../goleveldb/leveldb/journal/journal.go      |  520 ++++
 .../goleveldb/leveldb/journal/journal_test.go |  818 +++++
 .../syndtr/goleveldb/leveldb/key.go           |  142 +
 .../syndtr/goleveldb/leveldb/key_test.go      |  133 +
 .../goleveldb/leveldb/leveldb_suite_test.go   |   11 +
 .../goleveldb/leveldb/memdb/bench_test.go     |   75 +
 .../syndtr/goleveldb/leveldb/memdb/memdb.go   |  471 +++
 .../leveldb/memdb/memdb_suite_test.go         |   11 +
 .../goleveldb/leveldb/memdb/memdb_test.go     |  135 +
 .../syndtr/goleveldb/leveldb/opt/options.go   |  682 +++++
 .../syndtr/goleveldb/leveldb/options.go       |   92 +
 .../syndtr/goleveldb/leveldb/session.go       |  211 ++
 .../goleveldb/leveldb/session_compaction.go   |  287 ++
 .../goleveldb/leveldb/session_record.go       |  311 ++
 .../goleveldb/leveldb/session_record_test.go  |   64 +
 .../syndtr/goleveldb/leveldb/session_util.go  |  249 ++
 .../goleveldb/leveldb/storage/file_storage.go |  565 ++++
 .../leveldb/storage/file_storage_plan9.go     |   52 +
 .../leveldb/storage/file_storage_solaris.go   |   68 +
 .../leveldb/storage/file_storage_test.go      |  142 +
 .../leveldb/storage/file_storage_unix.go      |   73 +
 .../leveldb/storage/file_storage_windows.go   |   69 +
 .../goleveldb/leveldb/storage/mem_storage.go  |  203 ++
 .../leveldb/storage/mem_storage_test.go       |   66 +
 .../goleveldb/leveldb/storage/storage.go      |  173 ++
 .../syndtr/goleveldb/leveldb/storage_test.go  |  549 ++++
 .../syndtr/goleveldb/leveldb/table.go         |  529 ++++
 .../goleveldb/leveldb/table/block_test.go     |  139 +
 .../syndtr/goleveldb/leveldb/table/reader.go  | 1106 +++++++
 .../syndtr/goleveldb/leveldb/table/table.go   |  177 ++
 .../leveldb/table/table_suite_test.go         |   11 +
 .../goleveldb/leveldb/table/table_test.go     |  122 +
 .../syndtr/goleveldb/leveldb/table/writer.go  |  374 +++
 .../syndtr/goleveldb/leveldb/testutil/db.go   |  222 ++
 .../goleveldb/leveldb/testutil/ginkgo.go      |   21 +
 .../syndtr/goleveldb/leveldb/testutil/iter.go |  327 ++
 .../syndtr/goleveldb/leveldb/testutil/kv.go   |  352 +++
 .../goleveldb/leveldb/testutil/kvtest.go      |  187 ++
 .../goleveldb/leveldb/testutil/storage.go     |  586 ++++
 .../syndtr/goleveldb/leveldb/testutil/util.go |  171 ++
 .../syndtr/goleveldb/leveldb/testutil_test.go |   63 +
 .../syndtr/goleveldb/leveldb/util.go          |   91 +
 .../syndtr/goleveldb/leveldb/util/buffer.go   |  293 ++
 .../goleveldb/leveldb/util/buffer_pool.go     |  239 ++
 .../goleveldb/leveldb/util/buffer_test.go     |  369 +++
 .../syndtr/goleveldb/leveldb/util/crc32.go    |   30 +
 .../syndtr/goleveldb/leveldb/util/hash.go     |   48 +
 .../syndtr/goleveldb/leveldb/util/pool.go     |   21 +
 .../goleveldb/leveldb/util/pool_legacy.go     |   33 +
 .../syndtr/goleveldb/leveldb/util/range.go    |   32 +
 .../syndtr/goleveldb/leveldb/util/util.go     |   73 +
 .../syndtr/goleveldb/leveldb/version.go       |  457 +++
 README.rst => README.md                       |   11 +-
 debian/control                                |    4 +-
 debian/rules                                  |    4 +-
 server/db.go                                  |   98 +-
 103 files changed, 23808 insertions(+), 70 deletions(-)
 create mode 100644 Godeps/Godeps.json
 create mode 100644 Godeps/Readme
 create mode 100644 Godeps/_workspace/.gitignore
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/AUTHORS
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/LICENSE
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/README
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/decode.go
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/encode.go
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/snappy.go
 create mode 100644 Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
 create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
 rename README.rst => README.md (94%)

diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
new file mode 100644
index 0000000..f50a970
--- /dev/null
+++ b/Godeps/Godeps.json
@@ -0,0 +1,17 @@
+{
+	"ImportPath": "git.autistici.org/ai/audit",
+	"GoVersion": "go1.5.1",
+	"Packages": [
+		"./..."
+	],
+	"Deps": [
+		{
+			"ImportPath": "github.com/golang/snappy",
+			"Rev": "723cc1e459b8eea2dea4583200fd60757d40097a"
+		},
+		{
+			"ImportPath": "github.com/syndtr/goleveldb/leveldb",
+			"Rev": "5acacf6e72d3aeaf26dd3d3f163c635d3ef1e6e6"
+		}
+	]
+}
diff --git a/Godeps/Readme b/Godeps/Readme
new file mode 100644
index 0000000..4cdaa53
--- /dev/null
+++ b/Godeps/Readme
@@ -0,0 +1,5 @@
+This directory tree is generated automatically by godep.
+
+Please do not edit.
+
+See https://github.com/tools/godep for more information.
diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore
new file mode 100644
index 0000000..f037d68
--- /dev/null
+++ b/Godeps/_workspace/.gitignore
@@ -0,0 +1,2 @@
+/pkg
+/bin
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..824bf2e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,14 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..9f54f21
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,36 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/README b/Godeps/_workspace/src/github.com/golang/snappy/README
new file mode 100644
index 0000000..5074bba
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/README
@@ -0,0 +1,7 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/decode.go b/Godeps/_workspace/src/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..e7f1259
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/decode.go
@@ -0,0 +1,294 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if len(dst) < dLen {
+		dst = make([]byte, dLen)
+	}
+
+	var d, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-1])
+			case x == 61:
+				s += 3
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-2]) | uint(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
+			}
+			length = int(x + 1)
+			if length <= 0 {
+				return nil, errors.New("snappy: unsupported literal length")
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return nil, ErrCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if s > len(src) {
+				return nil, ErrCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
+
+		case tagCopy2:
+			s += 3
+			if s > len(src) {
+				return nil, ErrCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(src[s-2]) | int(src[s-1])<<8
+
+		case tagCopy4:
+			return nil, errors.New("snappy: unsupported COPY_4 tag")
+		}
+
+		end := d + length
+		if offset > d || end > len(dst) {
+			return nil, ErrCorrupt
+		}
+		for ; d < end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != dLen {
+		return nil, ErrCorrupt
+	}
+	return dst[:d], nil
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxUncompressedChunkLen),
+		buf:     make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
+	}
+}
+
+// Reader is an io.Reader than can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4]) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if !r.readFull(r.decoded[:n]) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)]) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen]) {
+			return 0, r.err
+		}
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/encode.go b/Godeps/_workspace/src/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..f3b5484
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/encode.go
@@ -0,0 +1,254 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"io"
+)
+
+// We limit how far copy back-references can go, the same as the C++ code.
+const maxOffset = 1 << 15
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	case n < 1<<16:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	case n < 1<<24:
+		dst[0] = 62<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		dst[3] = uint8(n >> 16)
+		i = 4
+	case int64(n) < 1<<32:
+		dst[0] = 63<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		dst[3] = uint8(n >> 16)
+		dst[4] = uint8(n >> 24)
+		i = 5
+	default:
+		panic("snappy: source buffer is too long")
+	}
+	if copy(dst[i:], lit) != len(lit) {
+		panic("snappy: destination buffer is too short")
+	}
+	return i + len(lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	for length > 0 {
+		x := length - 4
+		if 0 <= x && x < 1<<3 && offset < 1<<11 {
+			dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
+			dst[i+1] = uint8(offset)
+			i += 2
+			break
+		}
+
+		x = length
+		if x > 1<<6 {
+			x = 1 << 6
+		}
+		dst[i+0] = uint8(x-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= x
+	}
+	return i
+}
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	// Return early if src is short.
+	if len(src) <= 4 {
+		if len(src) != 0 {
+			d += emitLiteral(dst[d:], src)
+		}
+		return dst[:d]
+	}
+
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	const maxTableSize = 1 << 14
+	shift, tableSize := uint(32-8), 1<<8
+	for tableSize < maxTableSize && tableSize < len(src) {
+		shift--
+		tableSize *= 2
+	}
+	var table [maxTableSize]int
+
+	// Iterate over the source bytes.
+	var (
+		s   int // The iterator position.
+		t   int // The last position with the same hash as s.
+		lit int // The start position of any pending literal bytes.
+	)
+	for s+3 < len(src) {
+		// Update the hash table.
+		b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
+		h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
+		p := &table[(h*0x1e35a7bd)>>shift]
+		// We need to to store values in [-1, inf) in table. To save
+		// some initialization time, (re)use the table's zero value
+		// and shift the values against this zero: add 1 on writes,
+		// subtract 1 on reads.
+		t, *p = *p-1, s+1
+		// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
+		if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
+			s++
+			continue
+		}
+		// Otherwise, we have a match. First, emit any pending literal bytes.
+		if lit != s {
+			d += emitLiteral(dst[d:], src[lit:s])
+		}
+		// Extend the match to be as long as possible.
+		s0 := s
+		s, t = s+4, t+4
+		for s < len(src) && src[s] == src[t] {
+			s++
+			t++
+		}
+		// Emit the copied bytes.
+		d += emitCopy(dst[d:], s-t, s-s0)
+		lit = s
+	}
+
+	// Emit any final pending literal bytes and return.
+	if lit != len(src) {
+		d += emitLiteral(dst[d:], src[lit:])
+	}
+	return dst[:d]
+}
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+func MaxEncodedLen(srcLen int) int {
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	return 32 + srcLen + srcLen/6
+}
+
+// NewWriter returns a new Writer that compresses to w, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:   w,
+		enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
+	}
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+	w           io.Writer
+	err         error
+	enc         []byte
+	buf         [checksumSize + chunkHeaderSize]byte
+	wroteHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	w.wroteHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (n int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if !w.wroteHeader {
+		copy(w.enc, magicChunk)
+		if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
+			w.err = err
+			return n, err
+		}
+		w.wroteHeader = true
+	}
+	for len(p) > 0 {
+		var uncompressed []byte
+		if len(p) > maxUncompressedChunkLen {
+			uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkBody := Encode(w.enc, uncompressed)
+		if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
+		}
+
+		chunkLen := 4 + len(chunkBody)
+		w.buf[0] = chunkType
+		w.buf[1] = uint8(chunkLen >> 0)
+		w.buf[2] = uint8(chunkLen >> 8)
+		w.buf[3] = uint8(chunkLen >> 16)
+		w.buf[4] = uint8(checksum >> 0)
+		w.buf[5] = uint8(checksum >> 8)
+		w.buf[6] = uint8(checksum >> 16)
+		w.buf[7] = uint8(checksum >> 24)
+		if _, err := w.w.Write(w.buf[:]); err != nil {
+			w.err = err
+			return n, err
+		}
+		if _, err := w.w.Write(chunkBody); err != nil {
+			w.err = err
+			return n, err
+		}
+		n += len(uncompressed)
+	}
+	return n, nil
+}
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..e98653a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go
@@ -0,0 +1,68 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer supported.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
+	maxUncompressedChunkLen = 65536
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go
new file mode 100644
index 0000000..f8188f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go
@@ -0,0 +1,377 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strings"
+	"testing"
+)
+
+var (
+	download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+	testdata = flag.String("testdata", "testdata", "Directory containing the test data")
+)
+
+func roundtrip(b, ebuf, dbuf []byte) error {
+	d, err := Decode(dbuf, Encode(ebuf, b))
+	if err != nil {
+		return fmt.Errorf("decoding error: %v", err)
+	}
+	if !bytes.Equal(b, d) {
+		return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot  %v", b, d)
+	}
+	return nil
+}
+
+func TestEmpty(t *testing.T) {
+	if err := roundtrip(nil, nil, nil); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestSmallCopy(t *testing.T) {
+	for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
+		for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
+			for i := 0; i < 32; i++ {
+				s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
+				if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
+					t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
+				}
+			}
+		}
+	}
+}
+
+func TestSmallRand(t *testing.T) {
+	rng := rand.New(rand.NewSource(27354294))
+	for n := 1; n < 20000; n += 23 {
+		b := make([]byte, n)
+		for i := range b {
+			b[i] = uint8(rng.Uint32())
+		}
+		if err := roundtrip(b, nil, nil); err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
+func TestSmallRegular(t *testing.T) {
+	for n := 1; n < 20000; n += 23 {
+		b := make([]byte, n)
+		for i := range b {
+			b[i] = uint8(i%10 + 'a')
+		}
+		if err := roundtrip(b, nil, nil); err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
+func TestInvalidVarint(t *testing.T) {
+	data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00")
+	if _, err := DecodedLen(data); err != ErrCorrupt {
+		t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
+	}
+	if _, err := Decode(nil, data); err != ErrCorrupt {
+		t.Errorf("Decode: got %v, want ErrCorrupt", err)
+	}
+
+	// The encoded varint overflows 32 bits
+	data = []byte("\xff\xff\xff\xff\xff\x00")
+
+	if _, err := DecodedLen(data); err != ErrCorrupt {
+		t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
+	}
+	if _, err := Decode(nil, data); err != ErrCorrupt {
+		t.Errorf("Decode: got %v, want ErrCorrupt", err)
+	}
+}
+
+func cmp(a, b []byte) error {
+	if len(a) != len(b) {
+		return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
+	}
+	for i := range a {
+		if a[i] != b[i] {
+			return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
+		}
+	}
+	return nil
+}
+
+func TestFramingFormat(t *testing.T) {
+	// src is comprised of alternating 1e5-sized sequences of random
+	// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
+	// because it is larger than maxUncompressedChunkLen (64k).
+	src := make([]byte, 1e6)
+	rng := rand.New(rand.NewSource(1))
+	for i := 0; i < 10; i++ {
+		if i%2 == 0 {
+			for j := 0; j < 1e5; j++ {
+				src[1e5*i+j] = uint8(rng.Intn(256))
+			}
+		} else {
+			for j := 0; j < 1e5; j++ {
+				src[1e5*i+j] = uint8(i)
+			}
+		}
+	}
+
+	buf := new(bytes.Buffer)
+	if _, err := NewWriter(buf).Write(src); err != nil {
+		t.Fatalf("Write: encoding: %v", err)
+	}
+	dst, err := ioutil.ReadAll(NewReader(buf))
+	if err != nil {
+		t.Fatalf("ReadAll: decoding: %v", err)
+	}
+	if err := cmp(dst, src); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReaderReset(t *testing.T) {
+	gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
+	buf := new(bytes.Buffer)
+	if _, err := NewWriter(buf).Write(gold); err != nil {
+		t.Fatalf("Write: %v", err)
+	}
+	encoded, invalid, partial := buf.String(), "invalid", "partial"
+	r := NewReader(nil)
+	for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
+		if s == partial {
+			r.Reset(strings.NewReader(encoded))
+			if _, err := r.Read(make([]byte, 101)); err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+			continue
+		}
+		r.Reset(strings.NewReader(s))
+		got, err := ioutil.ReadAll(r)
+		switch s {
+		case encoded:
+			if err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+			if err := cmp(got, gold); err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+		case invalid:
+			if err == nil {
+				t.Errorf("#%d: got nil error, want non-nil", i)
+				continue
+			}
+		}
+	}
+}
+
+func TestWriterReset(t *testing.T) {
+	gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
+	var gots, wants [][]byte
+	const n = 20
+	w, failed := NewWriter(nil), false
+	for i := 0; i <= n; i++ {
+		buf := new(bytes.Buffer)
+		w.Reset(buf)
+		want := gold[:len(gold)*i/n]
+		if _, err := w.Write(want); err != nil {
+			t.Errorf("#%d: Write: %v", i, err)
+			failed = true
+			continue
+		}
+		got, err := ioutil.ReadAll(NewReader(buf))
+		if err != nil {
+			t.Errorf("#%d: ReadAll: %v", i, err)
+			failed = true
+			continue
+		}
+		gots = append(gots, got)
+		wants = append(wants, want)
+	}
+	if failed {
+		return
+	}
+	for i := range gots {
+		if err := cmp(gots[i], wants[i]); err != nil {
+			t.Errorf("#%d: %v", i, err)
+		}
+	}
+}
+
+func benchDecode(b *testing.B, src []byte) {
+	encoded := Encode(nil, src)
+	// Bandwidth is in amount of uncompressed data.
+	b.SetBytes(int64(len(src)))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		Decode(src, encoded)
+	}
+}
+
+func benchEncode(b *testing.B, src []byte) {
+	// Bandwidth is in amount of uncompressed data.
+	b.SetBytes(int64(len(src)))
+	dst := make([]byte, MaxEncodedLen(len(src)))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		Encode(dst, src)
+	}
+}
+
+func readFile(b testing.TB, filename string) []byte {
+	src, err := ioutil.ReadFile(filename)
+	if err != nil {
+		b.Skipf("skipping benchmark: %v", err)
+	}
+	if len(src) == 0 {
+		b.Fatalf("%s has zero length", filename)
+	}
+	return src
+}
+
+// expand returns a slice of length n containing repeated copies of src.
+func expand(src []byte, n int) []byte {
+	dst := make([]byte, n)
+	for x := dst; len(x) > 0; {
+		i := copy(x, src)
+		x = x[i:]
+	}
+	return dst
+}
+
+func benchWords(b *testing.B, n int, decode bool) {
+	// Note: the file is OS-language dependent so the resulting values are not
+	// directly comparable for non-US-English OS installations.
+	data := expand(readFile(b, "/usr/share/dict/words"), n)
+	if decode {
+		benchDecode(b, data)
+	} else {
+		benchEncode(b, data)
+	}
+}
+
+func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
+func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
+func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
+func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
+func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
+func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
+func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
+func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
+
+// testFiles' values are copied directly from
+// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
+// The label field is unused in snappy-go.
+var testFiles = []struct {
+	label    string
+	filename string
+}{
+	{"html", "html"},
+	{"urls", "urls.10K"},
+	{"jpg", "fireworks.jpeg"},
+	{"jpg_200", "fireworks.jpeg"},
+	{"pdf", "paper-100k.pdf"},
+	{"html4", "html_x_4"},
+	{"txt1", "alice29.txt"},
+	{"txt2", "asyoulik.txt"},
+	{"txt3", "lcet10.txt"},
+	{"txt4", "plrabn12.txt"},
+	{"pb", "geo.protodata"},
+	{"gaviota", "kppkn.gtb"},
+}
+
+// The test data files are present at this canonical URL.
+const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
+
+func downloadTestdata(b *testing.B, basename string) (errRet error) {
+	filename := filepath.Join(*testdata, basename)
+	if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
+		return nil
+	}
+
+	if !*download {
+		b.Skipf("test data not found; skipping benchmark without the -download flag")
+	}
+	// Download the official snappy C++ implementation reference test data
+	// files for benchmarking.
+	if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
+		return fmt.Errorf("failed to create testdata: %s", err)
+	}
+
+	f, err := os.Create(filename)
+	if err != nil {
+		return fmt.Errorf("failed to create %s: %s", filename, err)
+	}
+	defer f.Close()
+	defer func() {
+		if errRet != nil {
+			os.Remove(filename)
+		}
+	}()
+	url := baseURL + basename
+	resp, err := http.Get(url)
+	if err != nil {
+		return fmt.Errorf("failed to download %s: %s", url, err)
+	}
+	defer resp.Body.Close()
+	if s := resp.StatusCode; s != http.StatusOK {
+		return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
+	}
+	_, err = io.Copy(f, resp.Body)
+	if err != nil {
+		return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
+	}
+	return nil
+}
+
+func benchFile(b *testing.B, n int, decode bool) {
+	if err := downloadTestdata(b, testFiles[n].filename); err != nil {
+		b.Fatalf("failed to download testdata: %s", err)
+	}
+	data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
+	if decode {
+		benchDecode(b, data)
+	} else {
+		benchEncode(b, data)
+	}
+}
+
+// Naming convention is kept similar to what snappy's C++ implementation uses.
+func Benchmark_UFlat0(b *testing.B)  { benchFile(b, 0, true) }
+func Benchmark_UFlat1(b *testing.B)  { benchFile(b, 1, true) }
+func Benchmark_UFlat2(b *testing.B)  { benchFile(b, 2, true) }
+func Benchmark_UFlat3(b *testing.B)  { benchFile(b, 3, true) }
+func Benchmark_UFlat4(b *testing.B)  { benchFile(b, 4, true) }
+func Benchmark_UFlat5(b *testing.B)  { benchFile(b, 5, true) }
+func Benchmark_UFlat6(b *testing.B)  { benchFile(b, 6, true) }
+func Benchmark_UFlat7(b *testing.B)  { benchFile(b, 7, true) }
+func Benchmark_UFlat8(b *testing.B)  { benchFile(b, 8, true) }
+func Benchmark_UFlat9(b *testing.B)  { benchFile(b, 9, true) }
+func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
+func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
+func Benchmark_ZFlat0(b *testing.B)  { benchFile(b, 0, false) }
+func Benchmark_ZFlat1(b *testing.B)  { benchFile(b, 1, false) }
+func Benchmark_ZFlat2(b *testing.B)  { benchFile(b, 2, false) }
+func Benchmark_ZFlat3(b *testing.B)  { benchFile(b, 3, false) }
+func Benchmark_ZFlat4(b *testing.B)  { benchFile(b, 4, false) }
+func Benchmark_ZFlat5(b *testing.B)  { benchFile(b, 5, false) }
+func Benchmark_ZFlat6(b *testing.B)  { benchFile(b, 6, false) }
+func Benchmark_ZFlat7(b *testing.B)  { benchFile(b, 7, false) }
+func Benchmark_ZFlat8(b *testing.B)  { benchFile(b, 8, false) }
+func Benchmark_ZFlat9(b *testing.B)  { benchFile(b, 9, false) }
+func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
+func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
new file mode 100644
index 0000000..87171c5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -0,0 +1,252 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
+)
+
+type ErrBatchCorrupted struct {
+	Reason string
+}
+
+func (e *ErrBatchCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
+}
+
+func newErrBatchCorrupted(reason string) error {
+	return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
+}
+
+const (
+	batchHdrLen  = 8 + 4
+	batchGrowRec = 3000
+)
+
+type BatchReplay interface {
+	Put(key, value []byte)
+	Delete(key []byte)
+}
+
+// Batch is a write batch.
+type Batch struct {
+	data       []byte
+	rLen, bLen int
+	seq        uint64
+	sync       bool
+}
+
+func (b *Batch) grow(n int) {
+	off := len(b.data)
+	if off == 0 {
+		off = batchHdrLen
+		if b.data != nil {
+			b.data = b.data[:off]
+		}
+	}
+	if cap(b.data)-off < n {
+		if b.data == nil {
+			b.data = make([]byte, off, off+n)
+		} else {
+			odata := b.data
+			div := 1
+			if b.rLen > batchGrowRec {
+				div = b.rLen / batchGrowRec
+			}
+			b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
+			copy(b.data, odata)
+		}
+	}
+}
+
+func (b *Batch) appendRec(kt kType, key, value []byte) {
+	n := 1 + binary.MaxVarintLen32 + len(key)
+	if kt == ktVal {
+		n += binary.MaxVarintLen32 + len(value)
+	}
+	b.grow(n)
+	off := len(b.data)
+	data := b.data[:off+n]
+	data[off] = byte(kt)
+	off += 1
+	off += binary.PutUvarint(data[off:], uint64(len(key)))
+	copy(data[off:], key)
+	off += len(key)
+	if kt == ktVal {
+		off += binary.PutUvarint(data[off:], uint64(len(value)))
+		copy(data[off:], value)
+		off += len(value)
+	}
+	b.data = data[:off]
+	b.rLen++
+	//  Include 8-byte ikey header
+	b.bLen += len(key) + len(value) + 8
+}
+
+// Put appends 'put operation' of the given key/value pair to the batch.
+// It is safe to modify the contents of the argument after Put returns.
+func (b *Batch) Put(key, value []byte) {
+	b.appendRec(ktVal, key, value)
+}
+
+// Delete appends 'delete operation' of the given key to the batch.
+// It is safe to modify the contents of the argument after Delete returns.
+func (b *Batch) Delete(key []byte) {
+	b.appendRec(ktDel, key, nil)
+}
+
+// Dump dumps batch contents. The returned slice can be loaded into the
+// batch using Load method.
+// The returned slice is not its own copy, so the contents should not be
+// modified.
+func (b *Batch) Dump() []byte {
+	return b.encode()
+}
+
+// Load loads given slice into the batch. Previous contents of the batch
+// will be discarded.
+// The given slice will not be copied and will be used as batch buffer, so
+// it is not safe to modify the contents of the slice.
+func (b *Batch) Load(data []byte) error {
+	return b.decode(0, data)
+}
+
+// Replay replays batch contents.
+func (b *Batch) Replay(r BatchReplay) error {
+	return b.decodeRec(func(i int, kt kType, key, value []byte) {
+		switch kt {
+		case ktVal:
+			r.Put(key, value)
+		case ktDel:
+			r.Delete(key)
+		}
+	})
+}
+
+// Len returns number of records in the batch.
+func (b *Batch) Len() int {
+	return b.rLen
+}
+
+// Reset resets the batch.
+func (b *Batch) Reset() {
+	b.data = b.data[:0]
+	b.seq = 0
+	b.rLen = 0
+	b.bLen = 0
+	b.sync = false
+}
+
+func (b *Batch) init(sync bool) {
+	b.sync = sync
+}
+
+func (b *Batch) append(p *Batch) {
+	if p.rLen > 0 {
+		b.grow(len(p.data) - batchHdrLen)
+		b.data = append(b.data, p.data[batchHdrLen:]...)
+		b.rLen += p.rLen
+	}
+	if p.sync {
+		b.sync = true
+	}
+}
+
+// size returns sums of key/value pair length plus 8-bytes ikey.
+func (b *Batch) size() int {
+	return b.bLen
+}
+
+func (b *Batch) encode() []byte {
+	b.grow(0)
+	binary.LittleEndian.PutUint64(b.data, b.seq)
+	binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
+
+	return b.data
+}
+
+func (b *Batch) decode(prevSeq uint64, data []byte) error {
+	if len(data) < batchHdrLen {
+		return newErrBatchCorrupted("too short")
+	}
+
+	b.seq = binary.LittleEndian.Uint64(data)
+	if b.seq < prevSeq {
+		return newErrBatchCorrupted("invalid sequence number")
+	}
+	b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
+	if b.rLen < 0 {
+		return newErrBatchCorrupted("invalid records length")
+	}
+	// No need to be precise at this point, it won't be used anyway
+	b.bLen = len(data) - batchHdrLen
+	b.data = data
+
+	return nil
+}
+
+func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
+	off := batchHdrLen
+	for i := 0; i < b.rLen; i++ {
+		if off >= len(b.data) {
+			return newErrBatchCorrupted("invalid records length")
+		}
+
+		kt := kType(b.data[off])
+		if kt > ktVal {
+			return newErrBatchCorrupted("bad record: invalid type")
+		}
+		off += 1
+
+		x, n := binary.Uvarint(b.data[off:])
+		off += n
+		if n <= 0 || off+int(x) > len(b.data) {
+			return newErrBatchCorrupted("bad record: invalid key length")
+		}
+		key := b.data[off : off+int(x)]
+		off += int(x)
+		var value []byte
+		if kt == ktVal {
+			x, n := binary.Uvarint(b.data[off:])
+			off += n
+			if n <= 0 || off+int(x) > len(b.data) {
+				return newErrBatchCorrupted("bad record: invalid value length")
+			}
+			value = b.data[off : off+int(x)]
+			off += int(x)
+		}
+
+		f(i, kt, key, value)
+	}
+
+	return nil
+}
+
+func (b *Batch) memReplay(to *memdb.DB) error {
+	return b.decodeRec(func(i int, kt kType, key, value []byte) {
+		ikey := newIkey(key, b.seq+uint64(i), kt)
+		to.Put(ikey, value)
+	})
+}
+
+func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
+	if err := b.decode(prevSeq, data); err != nil {
+		return err
+	}
+	return b.memReplay(to)
+}
+
+func (b *Batch) revertMemReplay(to *memdb.DB) error {
+	return b.decodeRec(func(i int, kt kType, key, value []byte) {
+		ikey := newIkey(key, b.seq+uint64(i), kt)
+		to.Delete(ikey)
+	})
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
new file mode 100644
index 0000000..94cf173
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
@@ -0,0 +1,120 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"bytes"
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
+)
+
+type tbRec struct {
+	kt         kType
+	key, value []byte
+}
+
+type testBatch struct {
+	rec []*tbRec
+}
+
+func (p *testBatch) Put(key, value []byte) {
+	p.rec = append(p.rec, &tbRec{ktVal, key, value})
+}
+
+func (p *testBatch) Delete(key []byte) {
+	p.rec = append(p.rec, &tbRec{ktDel, key, nil})
+}
+
+func compareBatch(t *testing.T, b1, b2 *Batch) {
+	if b1.seq != b2.seq {
+		t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
+	}
+	if b1.Len() != b2.Len() {
+		t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
+	}
+	p1, p2 := new(testBatch), new(testBatch)
+	err := b1.Replay(p1)
+	if err != nil {
+		t.Fatal("error when replaying batch 1: ", err)
+	}
+	err = b2.Replay(p2)
+	if err != nil {
+		t.Fatal("error when replaying batch 2: ", err)
+	}
+	for i := range p1.rec {
+		r1, r2 := p1.rec[i], p2.rec[i]
+		if r1.kt != r2.kt {
+			t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
+		}
+		if !bytes.Equal(r1.key, r2.key) {
+			t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
+		}
+		if r1.kt == ktVal {
+			if !bytes.Equal(r1.value, r2.value) {
+				t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
+			}
+		}
+	}
+}
+
+func TestBatch_EncodeDecode(t *testing.T) {
+	b1 := new(Batch)
+	b1.seq = 10009
+	b1.Put([]byte("key1"), []byte("value1"))
+	b1.Put([]byte("key2"), []byte("value2"))
+	b1.Delete([]byte("key1"))
+	b1.Put([]byte("k"), []byte(""))
+	b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz"))
+	b1.Delete([]byte("key10000"))
+	b1.Delete([]byte("k"))
+	buf := b1.encode()
+	b2 := new(Batch)
+	err := b2.decode(0, buf)
+	if err != nil {
+		t.Error("error when decoding batch: ", err)
+	}
+	compareBatch(t, b1, b2)
+}
+
+func TestBatch_Append(t *testing.T) {
+	b1 := new(Batch)
+	b1.seq = 10009
+	b1.Put([]byte("key1"), []byte("value1"))
+	b1.Put([]byte("key2"), []byte("value2"))
+	b1.Delete([]byte("key1"))
+	b1.Put([]byte("foo"), []byte("foovalue"))
+	b1.Put([]byte("bar"), []byte("barvalue"))
+	b2a := new(Batch)
+	b2a.seq = 10009
+	b2a.Put([]byte("key1"), []byte("value1"))
+	b2a.Put([]byte("key2"), []byte("value2"))
+	b2a.Delete([]byte("key1"))
+	b2b := new(Batch)
+	b2b.Put([]byte("foo"), []byte("foovalue"))
+	b2b.Put([]byte("bar"), []byte("barvalue"))
+	b2a.append(b2b)
+	compareBatch(t, b1, b2a)
+}
+
+func TestBatch_Size(t *testing.T) {
+	b := new(Batch)
+	for i := 0; i < 2; i++ {
+		b.Put([]byte("key1"), []byte("value1"))
+		b.Put([]byte("key2"), []byte("value2"))
+		b.Delete([]byte("key1"))
+		b.Put([]byte("foo"), []byte("foovalue"))
+		b.Put([]byte("bar"), []byte("barvalue"))
+		mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0)
+		b.memReplay(mem)
+		if b.size() != mem.Size() {
+			t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size())
+		}
+		b.Reset()
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
new file mode 100644
index 0000000..0dd60fd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.2
+
+package leveldb
+
+import (
+	"sync/atomic"
+	"testing"
+)
+
+func BenchmarkDBReadConcurrent(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.gc()
+	defer p.close()
+
+	b.ResetTimer()
+	b.SetBytes(116)
+
+	b.RunParallel(func(pb *testing.PB) {
+		iter := p.newIter()
+		defer iter.Release()
+		for pb.Next() && iter.Next() {
+		}
+	})
+}
+
+func BenchmarkDBReadConcurrent2(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.gc()
+	defer p.close()
+
+	b.ResetTimer()
+	b.SetBytes(116)
+
+	var dir uint32
+	b.RunParallel(func(pb *testing.PB) {
+		iter := p.newIter()
+		defer iter.Release()
+		if atomic.AddUint32(&dir, 1)%2 == 0 {
+			for pb.Next() && iter.Next() {
+			}
+		} else {
+			if pb.Next() && iter.Last() {
+				for pb.Next() && iter.Prev() {
+				}
+			}
+		}
+	})
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
new file mode 100644
index 0000000..0893fec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
@@ -0,0 +1,464 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"bytes"
+	"fmt"
+	"math/rand"
+	"os"
+	"path/filepath"
+	"runtime"
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+func randomString(r *rand.Rand, n int) []byte {
+	b := new(bytes.Buffer)
+	for i := 0; i < n; i++ {
+		b.WriteByte(' ' + byte(r.Intn(95)))
+	}
+	return b.Bytes()
+}
+
+func compressibleStr(r *rand.Rand, frac float32, n int) []byte {
+	nn := int(float32(n) * frac)
+	rb := randomString(r, nn)
+	b := make([]byte, 0, n+nn)
+	for len(b) < n {
+		b = append(b, rb...)
+	}
+	return b[:n]
+}
+
+type valueGen struct {
+	src []byte
+	pos int
+}
+
+func newValueGen(frac float32) *valueGen {
+	v := new(valueGen)
+	r := rand.New(rand.NewSource(301))
+	v.src = make([]byte, 0, 1048576+100)
+	for len(v.src) < 1048576 {
+		v.src = append(v.src, compressibleStr(r, frac, 100)...)
+	}
+	return v
+}
+
+func (v *valueGen) get(n int) []byte {
+	if v.pos+n > len(v.src) {
+		v.pos = 0
+	}
+	v.pos += n
+	return v.src[v.pos-n : v.pos]
+}
+
+var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid()))
+
+type dbBench struct {
+	b    *testing.B
+	stor storage.Storage
+	db   *DB
+
+	o  *opt.Options
+	ro *opt.ReadOptions
+	wo *opt.WriteOptions
+
+	keys, values [][]byte
+}
+
+func openDBBench(b *testing.B, noCompress bool) *dbBench {
+	_, err := os.Stat(benchDB)
+	if err == nil {
+		err = os.RemoveAll(benchDB)
+		if err != nil {
+			b.Fatal("cannot remove old db: ", err)
+		}
+	}
+
+	p := &dbBench{
+		b:  b,
+		o:  &opt.Options{},
+		ro: &opt.ReadOptions{},
+		wo: &opt.WriteOptions{},
+	}
+	p.stor, err = storage.OpenFile(benchDB)
+	if err != nil {
+		b.Fatal("cannot open stor: ", err)
+	}
+	if noCompress {
+		p.o.Compression = opt.NoCompression
+	}
+
+	p.db, err = Open(p.stor, p.o)
+	if err != nil {
+		b.Fatal("cannot open db: ", err)
+	}
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+	return p
+}
+
+func (p *dbBench) reopen() {
+	p.db.Close()
+	var err error
+	p.db, err = Open(p.stor, p.o)
+	if err != nil {
+		p.b.Fatal("Reopen: got error: ", err)
+	}
+}
+
+func (p *dbBench) populate(n int) {
+	p.keys, p.values = make([][]byte, n), make([][]byte, n)
+	v := newValueGen(0.5)
+	for i := range p.keys {
+		p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100)
+	}
+}
+
+func (p *dbBench) randomize() {
+	m := len(p.keys)
+	times := m * 2
+	r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface))
+	for n := 0; n < times; n++ {
+		i, j := r1.Int()%m, r2.Int()%m
+		if i == j {
+			continue
+		}
+		p.keys[i], p.keys[j] = p.keys[j], p.keys[i]
+		p.values[i], p.values[j] = p.values[j], p.values[i]
+	}
+}
+
+func (p *dbBench) writes(perBatch int) {
+	b := p.b
+	db := p.db
+
+	n := len(p.keys)
+	m := n / perBatch
+	if n%perBatch > 0 {
+		m++
+	}
+	batches := make([]Batch, m)
+	j := 0
+	for i := range batches {
+		first := true
+		for ; j < n && ((j+1)%perBatch != 0 || first); j++ {
+			first = false
+			batches[i].Put(p.keys[j], p.values[j])
+		}
+	}
+	runtime.GC()
+
+	b.ResetTimer()
+	b.StartTimer()
+	for i := range batches {
+		err := db.Write(&(batches[i]), p.wo)
+		if err != nil {
+			b.Fatal("write failed: ", err)
+		}
+	}
+	b.StopTimer()
+	b.SetBytes(116)
+}
+
+func (p *dbBench) gc() {
+	p.keys, p.values = nil, nil
+	runtime.GC()
+}
+
+func (p *dbBench) puts() {
+	b := p.b
+	db := p.db
+
+	b.ResetTimer()
+	b.StartTimer()
+	for i := range p.keys {
+		err := db.Put(p.keys[i], p.values[i], p.wo)
+		if err != nil {
+			b.Fatal("put failed: ", err)
+		}
+	}
+	b.StopTimer()
+	b.SetBytes(116)
+}
+
+func (p *dbBench) fill() {
+	b := p.b
+	db := p.db
+
+	perBatch := 10000
+	batch := new(Batch)
+	for i, n := 0, len(p.keys); i < n; {
+		first := true
+		for ; i < n && ((i+1)%perBatch != 0 || first); i++ {
+			first = false
+			batch.Put(p.keys[i], p.values[i])
+		}
+		err := db.Write(batch, p.wo)
+		if err != nil {
+			b.Fatal("write failed: ", err)
+		}
+		batch.Reset()
+	}
+}
+
+func (p *dbBench) gets() {
+	b := p.b
+	db := p.db
+
+	b.ResetTimer()
+	for i := range p.keys {
+		_, err := db.Get(p.keys[i], p.ro)
+		if err != nil {
+			b.Error("got error: ", err)
+		}
+	}
+	b.StopTimer()
+}
+
+func (p *dbBench) seeks() {
+	b := p.b
+
+	iter := p.newIter()
+	defer iter.Release()
+	b.ResetTimer()
+	for i := range p.keys {
+		if !iter.Seek(p.keys[i]) {
+			b.Error("value not found for: ", string(p.keys[i]))
+		}
+	}
+	b.StopTimer()
+}
+
+func (p *dbBench) newIter() iterator.Iterator {
+	iter := p.db.NewIterator(nil, p.ro)
+	err := iter.Error()
+	if err != nil {
+		p.b.Fatal("cannot create iterator: ", err)
+	}
+	return iter
+}
+
+func (p *dbBench) close() {
+	if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
+		p.b.Log("Block pool stats: ", bp)
+	}
+	p.db.Close()
+	p.stor.Close()
+	os.RemoveAll(benchDB)
+	p.db = nil
+	p.keys = nil
+	p.values = nil
+	runtime.GC()
+	runtime.GOMAXPROCS(1)
+}
+
+func BenchmarkDBWrite(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.writes(1)
+	p.close()
+}
+
+func BenchmarkDBWriteBatch(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.writes(1000)
+	p.close()
+}
+
+func BenchmarkDBWriteUncompressed(b *testing.B) {
+	p := openDBBench(b, true)
+	p.populate(b.N)
+	p.writes(1)
+	p.close()
+}
+
+func BenchmarkDBWriteBatchUncompressed(b *testing.B) {
+	p := openDBBench(b, true)
+	p.populate(b.N)
+	p.writes(1000)
+	p.close()
+}
+
+func BenchmarkDBWriteRandom(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.randomize()
+	p.writes(1)
+	p.close()
+}
+
+func BenchmarkDBWriteRandomSync(b *testing.B) {
+	p := openDBBench(b, false)
+	p.wo.Sync = true
+	p.populate(b.N)
+	p.writes(1)
+	p.close()
+}
+
+func BenchmarkDBOverwrite(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.writes(1)
+	p.writes(1)
+	p.close()
+}
+
+func BenchmarkDBOverwriteRandom(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.writes(1)
+	p.randomize()
+	p.writes(1)
+	p.close()
+}
+
+func BenchmarkDBPut(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.puts()
+	p.close()
+}
+
+func BenchmarkDBRead(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.gc()
+
+	iter := p.newIter()
+	b.ResetTimer()
+	for iter.Next() {
+	}
+	iter.Release()
+	b.StopTimer()
+	b.SetBytes(116)
+	p.close()
+}
+
+func BenchmarkDBReadGC(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+
+	iter := p.newIter()
+	b.ResetTimer()
+	for iter.Next() {
+	}
+	iter.Release()
+	b.StopTimer()
+	b.SetBytes(116)
+	p.close()
+}
+
+func BenchmarkDBReadUncompressed(b *testing.B) {
+	p := openDBBench(b, true)
+	p.populate(b.N)
+	p.fill()
+	p.gc()
+
+	iter := p.newIter()
+	b.ResetTimer()
+	for iter.Next() {
+	}
+	iter.Release()
+	b.StopTimer()
+	b.SetBytes(116)
+	p.close()
+}
+
+func BenchmarkDBReadTable(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.reopen()
+	p.gc()
+
+	iter := p.newIter()
+	b.ResetTimer()
+	for iter.Next() {
+	}
+	iter.Release()
+	b.StopTimer()
+	b.SetBytes(116)
+	p.close()
+}
+
+func BenchmarkDBReadReverse(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.gc()
+
+	iter := p.newIter()
+	b.ResetTimer()
+	iter.Last()
+	for iter.Prev() {
+	}
+	iter.Release()
+	b.StopTimer()
+	b.SetBytes(116)
+	p.close()
+}
+
+func BenchmarkDBReadReverseTable(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.reopen()
+	p.gc()
+
+	iter := p.newIter()
+	b.ResetTimer()
+	iter.Last()
+	for iter.Prev() {
+	}
+	iter.Release()
+	b.StopTimer()
+	b.SetBytes(116)
+	p.close()
+}
+
+func BenchmarkDBSeek(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.seeks()
+	p.close()
+}
+
+func BenchmarkDBSeekRandom(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.randomize()
+	p.seeks()
+	p.close()
+}
+
+func BenchmarkDBGet(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.gets()
+	p.close()
+}
+
+func BenchmarkDBGetRandom(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.randomize()
+	p.gets()
+	p.close()
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
new file mode 100644
index 0000000..175e222
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.2
+
+package cache
+
+import (
+	"math/rand"
+	"testing"
+)
+
+func BenchmarkLRUCache(b *testing.B) {
+	c := NewCache(NewLRU(10000))
+
+	b.SetParallelism(10)
+	b.RunParallel(func(pb *testing.PB) {
+		r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+		for pb.Next() {
+			key := uint64(r.Intn(1000000))
+			c.Get(0, key, func() (int, Value) {
+				return 1, key
+			}).Release()
+		}
+	})
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
new file mode 100644
index 0000000..f7b42e2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -0,0 +1,676 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package cache provides interface and implementation of a cache algorithms.
+package cache
+
+import (
+	"sync"
+	"sync/atomic"
+	"unsafe"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be goroutine-safe.
+type Cacher interface {
+	// Capacity returns cache capacity.
+	Capacity() int
+
+	// SetCapacity sets cache capacity.
+	SetCapacity(capacity int)
+
+	// Promote promotes the 'cache node'.
+	Promote(n *Node)
+
+	// Ban evicts the 'cache node' and prevent subsequent 'promote'.
+	Ban(n *Node)
+
+	// Evict evicts the 'cache node'.
+	Evict(n *Node)
+
+	// EvictNS evicts 'cache node' with the given namespace.
+	EvictNS(ns uint64)
+
+	// EvictAll evicts all 'cache node'.
+	EvictAll()
+
+	// Close closes the 'cache tree'
+	Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+type CacheGetter struct {
+	Cache *Cache
+	NS    uint64
+}
+
+func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+	return g.Cache.Get(g.NS, key, setFunc)
+}
+
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
+
+const (
+	mInitialSize           = 1 << 4
+	mOverflowThreshold     = 1 << 5
+	mOverflowGrowThreshold = 1 << 7
+)
+
+type mBucket struct {
+	mu     sync.Mutex
+	node   []*Node
+	frozen bool
+}
+
+func (b *mBucket) freeze() []*Node {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if !b.frozen {
+		b.frozen = true
+	}
+	return b.node
+}
+
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+	b.mu.Lock()
+
+	if b.frozen {
+		b.mu.Unlock()
+		return
+	}
+
+	// Scan the node.
+	for _, n := range b.node {
+		if n.hash == hash && n.ns == ns && n.key == key {
+			atomic.AddInt32(&n.ref, 1)
+			b.mu.Unlock()
+			return true, false, n
+		}
+	}
+
+	// Get only.
+	if noset {
+		b.mu.Unlock()
+		return true, false, nil
+	}
+
+	// Create node.
+	n = &Node{
+		r:    r,
+		hash: hash,
+		ns:   ns,
+		key:  key,
+		ref:  1,
+	}
+	// Add node to bucket.
+	b.node = append(b.node, n)
+	bLen := len(b.node)
+	b.mu.Unlock()
+
+	// Update counter.
+	grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+	if bLen > mOverflowThreshold {
+		grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+	}
+
+	// Grow.
+	if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+		nhLen := len(h.buckets) << 1
+		nh := &mNode{
+			buckets:         make([]unsafe.Pointer, nhLen),
+			mask:            uint32(nhLen) - 1,
+			pred:            unsafe.Pointer(h),
+			growThreshold:   int32(nhLen * mOverflowThreshold),
+			shrinkThreshold: int32(nhLen >> 1),
+		}
+		ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+		if !ok {
+			panic("BUG: failed swapping head")
+		}
+		go nh.initBuckets()
+	}
+
+	return true, true, n
+}
+
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+	b.mu.Lock()
+
+	if b.frozen {
+		b.mu.Unlock()
+		return
+	}
+
+	// Scan the node.
+	var (
+		n    *Node
+		bLen int
+	)
+	for i := range b.node {
+		n = b.node[i]
+		if n.ns == ns && n.key == key {
+			if atomic.LoadInt32(&n.ref) == 0 {
+				deleted = true
+
+				// Call releaser.
+				if n.value != nil {
+					if r, ok := n.value.(util.Releaser); ok {
+						r.Release()
+					}
+					n.value = nil
+				}
+
+				// Remove node from bucket.
+				b.node = append(b.node[:i], b.node[i+1:]...)
+				bLen = len(b.node)
+			}
+			break
+		}
+	}
+	b.mu.Unlock()
+
+	if deleted {
+		// Call OnDel.
+		for _, f := range n.onDel {
+			f()
+		}
+
+		// Update counter.
+		atomic.AddInt32(&r.size, int32(n.size)*-1)
+		shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+		if bLen >= mOverflowThreshold {
+			atomic.AddInt32(&h.overflow, -1)
+		}
+
+		// Shrink.
+		if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+			nhLen := len(h.buckets) >> 1
+			nh := &mNode{
+				buckets:         make([]unsafe.Pointer, nhLen),
+				mask:            uint32(nhLen) - 1,
+				pred:            unsafe.Pointer(h),
+				growThreshold:   int32(nhLen * mOverflowThreshold),
+				shrinkThreshold: int32(nhLen >> 1),
+			}
+			ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+			if !ok {
+				panic("BUG: failed swapping head")
+			}
+			go nh.initBuckets()
+		}
+	}
+
+	return true, deleted
+}
+
+type mNode struct {
+	buckets         []unsafe.Pointer // []*mBucket
+	mask            uint32
+	pred            unsafe.Pointer // *mNode
+	resizeInProgess int32
+
+	overflow        int32
+	growThreshold   int32
+	shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+	if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+		return b
+	}
+
+	p := (*mNode)(atomic.LoadPointer(&n.pred))
+	if p != nil {
+		var node []*Node
+		if n.mask > p.mask {
+			// Grow.
+			pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+			if pb == nil {
+				pb = p.initBucket(i & p.mask)
+			}
+			m := pb.freeze()
+			// Split nodes.
+			for _, x := range m {
+				if x.hash&n.mask == i {
+					node = append(node, x)
+				}
+			}
+		} else {
+			// Shrink.
+			pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+			if pb0 == nil {
+				pb0 = p.initBucket(i)
+			}
+			pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+			if pb1 == nil {
+				pb1 = p.initBucket(i + uint32(len(n.buckets)))
+			}
+			m0 := pb0.freeze()
+			m1 := pb1.freeze()
+			// Merge nodes.
+			node = make([]*Node, 0, len(m0)+len(m1))
+			node = append(node, m0...)
+			node = append(node, m1...)
+		}
+		b := &mBucket{node: node}
+		if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+			if len(node) > mOverflowThreshold {
+				atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+			}
+			return b
+		}
+	}
+
+	return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+	for i := range n.buckets {
+		n.initBucket(uint32(i))
+	}
+	atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+	mu     sync.RWMutex
+	mHead  unsafe.Pointer // *mNode
+	nodes  int32
+	size   int32
+	cacher Cacher
+	closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+	h := &mNode{
+		buckets:         make([]unsafe.Pointer, mInitialSize),
+		mask:            mInitialSize - 1,
+		growThreshold:   int32(mInitialSize * mOverflowThreshold),
+		shrinkThreshold: 0,
+	}
+	for i := range h.buckets {
+		h.buckets[i] = unsafe.Pointer(&mBucket{})
+	}
+	r := &Cache{
+		mHead:  unsafe.Pointer(h),
+		cacher: cacher,
+	}
+	return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+	h := (*mNode)(atomic.LoadPointer(&r.mHead))
+	i := hash & h.mask
+	b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+	if b == nil {
+		b = h.initBucket(i)
+	}
+	return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+	for {
+		h, b := r.getBucket(n.hash)
+		done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+		if done {
+			return deleted
+		}
+	}
+	return false
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+	return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+	return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+	if r.cacher == nil {
+		return 0
+	}
+	return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+	if r.cacher != nil {
+		r.cacher.SetCapacity(capacity)
+	}
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return nil
+	}
+
+	hash := murmur32(ns, key, 0xf00)
+	for {
+		h, b := r.getBucket(hash)
+		done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+		if done {
+			if n != nil {
+				n.mu.Lock()
+				if n.value == nil {
+					if setFunc == nil {
+						n.mu.Unlock()
+						n.unref()
+						return nil
+					}
+
+					n.size, n.value = setFunc()
+					if n.value == nil {
+						n.size = 0
+						n.mu.Unlock()
+						n.unref()
+						return nil
+					}
+					atomic.AddInt32(&r.size, int32(n.size))
+				}
+				n.mu.Unlock()
+				if r.cacher != nil {
+					r.cacher.Promote(n)
+				}
+				return &Handle{unsafe.Pointer(n)}
+			}
+
+			break
+		}
+	}
+	return nil
+}
+
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return false
+	}
+
+	hash := murmur32(ns, key, 0xf00)
+	for {
+		h, b := r.getBucket(hash)
+		done, _, n := b.get(r, h, hash, ns, key, true)
+		if done {
+			if n != nil {
+				if onDel != nil {
+					n.mu.Lock()
+					n.onDel = append(n.onDel, onDel)
+					n.mu.Unlock()
+				}
+				if r.cacher != nil {
+					r.cacher.Ban(n)
+				}
+				n.unref()
+				return true
+			}
+
+			break
+		}
+	}
+
+	if onDel != nil {
+		onDel()
+	}
+
+	return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return false
+	}
+
+	hash := murmur32(ns, key, 0xf00)
+	for {
+		h, b := r.getBucket(hash)
+		done, _, n := b.get(r, h, hash, ns, key, true)
+		if done {
+			if n != nil {
+				if r.cacher != nil {
+					r.cacher.Evict(n)
+				}
+				n.unref()
+				return true
+			}
+
+			break
+		}
+	}
+
+	return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return
+	}
+
+	if r.cacher != nil {
+		r.cacher.EvictNS(ns)
+	}
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return
+	}
+
+	if r.cacher != nil {
+		r.cacher.EvictAll()
+	}
+}
+
+// Close closes the 'cache map' and releases all 'cache node'.
+func (r *Cache) Close() error {
+	r.mu.Lock()
+	if !r.closed {
+		r.closed = true
+
+		if r.cacher != nil {
+			if err := r.cacher.Close(); err != nil {
+				return err
+			}
+		}
+
+		h := (*mNode)(r.mHead)
+		h.initBuckets()
+
+		for i := range h.buckets {
+			b := (*mBucket)(h.buckets[i])
+			for _, n := range b.node {
+				// Call releaser.
+				if n.value != nil {
+					if r, ok := n.value.(util.Releaser); ok {
+						r.Release()
+					}
+					n.value = nil
+				}
+
+				// Call OnDel.
+				for _, f := range n.onDel {
+					f()
+				}
+			}
+		}
+	}
+	r.mu.Unlock()
+	return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+	r *Cache
+
+	hash    uint32
+	ns, key uint64
+
+	mu    sync.Mutex
+	size  int
+	value Value
+
+	ref   int32
+	onDel []func()
+
+	CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+	return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+	return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+	return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+	return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+	return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+	if atomic.AddInt32(&n.ref, 1) <= 1 {
+		panic("BUG: Node.GetHandle on zero ref")
+	}
+	return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+	if atomic.AddInt32(&n.ref, -1) == 0 {
+		n.r.delete(n)
+	}
+}
+
+func (n *Node) unrefLocked() {
+	if atomic.AddInt32(&n.ref, -1) == 0 {
+		n.r.mu.RLock()
+		if !n.r.closed {
+			n.r.delete(n)
+		}
+		n.r.mu.RUnlock()
+	}
+}
+
+type Handle struct {
+	n unsafe.Pointer // *Node
+}
+
+func (h *Handle) Value() Value {
+	n := (*Node)(atomic.LoadPointer(&h.n))
+	if n != nil {
+		return n.value
+	}
+	return nil
+}
+
+func (h *Handle) Release() {
+	nPtr := atomic.LoadPointer(&h.n)
+	if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+		n := (*Node)(nPtr)
+		n.unrefLocked()
+	}
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+	const (
+		m = uint32(0x5bd1e995)
+		r = 24
+	)
+
+	k1 := uint32(ns >> 32)
+	k2 := uint32(ns)
+	k3 := uint32(key >> 32)
+	k4 := uint32(key)
+
+	k1 *= m
+	k1 ^= k1 >> r
+	k1 *= m
+
+	k2 *= m
+	k2 ^= k2 >> r
+	k2 *= m
+
+	k3 *= m
+	k3 ^= k3 >> r
+	k3 *= m
+
+	k4 *= m
+	k4 ^= k4 >> r
+	k4 *= m
+
+	h := seed
+
+	h *= m
+	h ^= k1
+	h *= m
+	h ^= k2
+	h *= m
+	h ^= k3
+	h *= m
+	h ^= k4
+
+	h ^= h >> 13
+	h *= m
+	h ^= h >> 15
+
+	return h
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
new file mode 100644
index 0000000..c2a5015
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
@@ -0,0 +1,554 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+	"math/rand"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"testing"
+	"time"
+	"unsafe"
+)
+
+type int32o int32
+
+func (o *int32o) acquire() {
+	if atomic.AddInt32((*int32)(o), 1) != 1 {
+		panic("BUG: invalid ref")
+	}
+}
+
+func (o *int32o) Release() {
+	if atomic.AddInt32((*int32)(o), -1) != 0 {
+		panic("BUG: invalid ref")
+	}
+}
+
+type releaserFunc struct {
+	fn    func()
+	value Value
+}
+
+func (r releaserFunc) Release() {
+	if r.fn != nil {
+		r.fn()
+	}
+}
+
+func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
+	return c.Get(ns, key, func() (int, Value) {
+		if relf != nil {
+			return charge, releaserFunc{relf, value}
+		} else {
+			return charge, value
+		}
+	})
+}
+
+func TestCacheMap(t *testing.T) {
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	nsx := []struct {
+		nobjects, nhandles, concurrent, repeat int
+	}{
+		{10000, 400, 50, 3},
+		{100000, 1000, 100, 10},
+	}
+
+	var (
+		objects [][]int32o
+		handles [][]unsafe.Pointer
+	)
+
+	for _, x := range nsx {
+		objects = append(objects, make([]int32o, x.nobjects))
+		handles = append(handles, make([]unsafe.Pointer, x.nhandles))
+	}
+
+	c := NewCache(nil)
+
+	wg := new(sync.WaitGroup)
+	var done int32
+
+	for ns, x := range nsx {
+		for i := 0; i < x.concurrent; i++ {
+			wg.Add(1)
+			go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
+				defer wg.Done()
+				r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+				for j := len(objects) * repeat; j >= 0; j-- {
+					key := uint64(r.Intn(len(objects)))
+					h := c.Get(uint64(ns), key, func() (int, Value) {
+						o := &objects[key]
+						o.acquire()
+						return 1, o
+					})
+					if v := h.Value().(*int32o); v != &objects[key] {
+						t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
+					}
+					if objects[key] != 1 {
+						t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
+					}
+					if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
+						h.Release()
+					}
+				}
+			}(ns, i, x.repeat, objects[ns], handles[ns])
+		}
+
+		go func(handles []unsafe.Pointer) {
+			r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+			for atomic.LoadInt32(&done) == 0 {
+				i := r.Intn(len(handles))
+				h := (*Handle)(atomic.LoadPointer(&handles[i]))
+				if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
+					h.Release()
+				}
+				time.Sleep(time.Millisecond)
+			}
+		}(handles[ns])
+	}
+
+	go func() {
+		handles := make([]*Handle, 100000)
+		for atomic.LoadInt32(&done) == 0 {
+			for i := range handles {
+				handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
+					return 1, 1
+				})
+			}
+			for _, h := range handles {
+				h.Release()
+			}
+		}
+	}()
+
+	wg.Wait()
+
+	atomic.StoreInt32(&done, 1)
+
+	for _, handles0 := range handles {
+		for i := range handles0 {
+			h := (*Handle)(atomic.LoadPointer(&handles0[i]))
+			if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
+				h.Release()
+			}
+		}
+	}
+
+	for ns, objects0 := range objects {
+		for i, o := range objects0 {
+			if o != 0 {
+				t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
+			}
+		}
+	}
+}
+
+func TestCacheMap_NodesAndSize(t *testing.T) {
+	c := NewCache(nil)
+	if c.Nodes() != 0 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+	}
+	if c.Size() != 0 {
+		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+	}
+	set(c, 0, 1, 1, 1, nil)
+	set(c, 0, 2, 2, 2, nil)
+	set(c, 1, 1, 3, 3, nil)
+	set(c, 2, 1, 4, 1, nil)
+	if c.Nodes() != 4 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
+	}
+	if c.Size() != 7 {
+		t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
+	}
+}
+
+func TestLRUCache_Capacity(t *testing.T) {
+	c := NewCache(NewLRU(10))
+	if c.Capacity() != 10 {
+		t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
+	}
+	set(c, 0, 1, 1, 1, nil).Release()
+	set(c, 0, 2, 2, 2, nil).Release()
+	set(c, 1, 1, 3, 3, nil).Release()
+	set(c, 2, 1, 4, 1, nil).Release()
+	set(c, 2, 2, 5, 1, nil).Release()
+	set(c, 2, 3, 6, 1, nil).Release()
+	set(c, 2, 4, 7, 1, nil).Release()
+	set(c, 2, 5, 8, 1, nil).Release()
+	if c.Nodes() != 7 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
+	}
+	if c.Size() != 10 {
+		t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
+	}
+	c.SetCapacity(9)
+	if c.Capacity() != 9 {
+		t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
+	}
+	if c.Nodes() != 6 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
+	}
+	if c.Size() != 8 {
+		t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
+	}
+}
+
+func TestCacheMap_NilValue(t *testing.T) {
+	c := NewCache(NewLRU(10))
+	h := c.Get(0, 0, func() (size int, value Value) {
+		return 1, nil
+	})
+	if h != nil {
+		t.Error("cache handle is non-nil")
+	}
+	if c.Nodes() != 0 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+	}
+	if c.Size() != 0 {
+		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+	}
+}
+
+func TestLRUCache_GetLatency(t *testing.T) {
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	const (
+		concurrentSet = 30
+		concurrentGet = 3
+		duration      = 3 * time.Second
+		delay         = 3 * time.Millisecond
+		maxkey        = 100000
+	)
+
+	var (
+		set, getHit, getAll        int32
+		getMaxLatency, getDuration int64
+	)
+
+	c := NewCache(NewLRU(5000))
+	wg := &sync.WaitGroup{}
+	until := time.Now().Add(duration)
+	for i := 0; i < concurrentSet; i++ {
+		wg.Add(1)
+		go func(i int) {
+			defer wg.Done()
+			r := rand.New(rand.NewSource(time.Now().UnixNano()))
+			for time.Now().Before(until) {
+				c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
+					time.Sleep(delay)
+					atomic.AddInt32(&set, 1)
+					return 1, 1
+				}).Release()
+			}
+		}(i)
+	}
+	for i := 0; i < concurrentGet; i++ {
+		wg.Add(1)
+		go func(i int) {
+			defer wg.Done()
+			r := rand.New(rand.NewSource(time.Now().UnixNano()))
+			for {
+				mark := time.Now()
+				if mark.Before(until) {
+					h := c.Get(0, uint64(r.Intn(maxkey)), nil)
+					latency := int64(time.Now().Sub(mark))
+					m := atomic.LoadInt64(&getMaxLatency)
+					if latency > m {
+						atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
+					}
+					atomic.AddInt64(&getDuration, latency)
+					if h != nil {
+						atomic.AddInt32(&getHit, 1)
+						h.Release()
+					}
+					atomic.AddInt32(&getAll, 1)
+				} else {
+					break
+				}
+			}
+		}(i)
+	}
+
+	wg.Wait()
+	getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
+	t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
+		set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
+
+	if getAvglatency > delay/3 {
+		t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
+	}
+}
+
+func TestLRUCache_HitMiss(t *testing.T) {
+	cases := []struct {
+		key   uint64
+		value string
+	}{
+		{1, "vvvvvvvvv"},
+		{100, "v1"},
+		{0, "v2"},
+		{12346, "v3"},
+		{777, "v4"},
+		{999, "v5"},
+		{7654, "v6"},
+		{2, "v7"},
+		{3, "v8"},
+		{9, "v9"},
+	}
+
+	setfin := 0
+	c := NewCache(NewLRU(1000))
+	for i, x := range cases {
+		set(c, 0, x.key, x.value, len(x.value), func() {
+			setfin++
+		}).Release()
+		for j, y := range cases {
+			h := c.Get(0, y.key, nil)
+			if j <= i {
+				// should hit
+				if h == nil {
+					t.Errorf("case '%d' iteration '%d' is miss", i, j)
+				} else {
+					if x := h.Value().(releaserFunc).value.(string); x != y.value {
+						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+					}
+				}
+			} else {
+				// should miss
+				if h != nil {
+					t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
+				}
+			}
+			if h != nil {
+				h.Release()
+			}
+		}
+	}
+
+	for i, x := range cases {
+		finalizerOk := false
+		c.Delete(0, x.key, func() {
+			finalizerOk = true
+		})
+
+		if !finalizerOk {
+			t.Errorf("case %d delete finalizer not executed", i)
+		}
+
+		for j, y := range cases {
+			h := c.Get(0, y.key, nil)
+			if j > i {
+				// should hit
+				if h == nil {
+					t.Errorf("case '%d' iteration '%d' is miss", i, j)
+				} else {
+					if x := h.Value().(releaserFunc).value.(string); x != y.value {
+						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+					}
+				}
+			} else {
+				// should miss
+				if h != nil {
+					t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
+				}
+			}
+			if h != nil {
+				h.Release()
+			}
+		}
+	}
+
+	if setfin != len(cases) {
+		t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
+	}
+}
+
+func TestLRUCache_Eviction(t *testing.T) {
+	c := NewCache(NewLRU(12))
+	o1 := set(c, 0, 1, 1, 1, nil)
+	set(c, 0, 2, 2, 1, nil).Release()
+	set(c, 0, 3, 3, 1, nil).Release()
+	set(c, 0, 4, 4, 1, nil).Release()
+	set(c, 0, 5, 5, 1, nil).Release()
+	if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
+		h.Release()
+	}
+	set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
+
+	for _, key := range []uint64{9, 2, 5, 1} {
+		h := c.Get(0, key, nil)
+		if h == nil {
+			t.Errorf("miss for key '%d'", key)
+		} else {
+			if x := h.Value().(int); x != int(key) {
+				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
+			}
+			h.Release()
+		}
+	}
+	o1.Release()
+	for _, key := range []uint64{1, 2, 5} {
+		h := c.Get(0, key, nil)
+		if h == nil {
+			t.Errorf("miss for key '%d'", key)
+		} else {
+			if x := h.Value().(int); x != int(key) {
+				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
+			}
+			h.Release()
+		}
+	}
+	for _, key := range []uint64{3, 4, 9} {
+		h := c.Get(0, key, nil)
+		if h != nil {
+			t.Errorf("hit for key '%d'", key)
+			if x := h.Value().(int); x != int(key) {
+				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
+			}
+			h.Release()
+		}
+	}
+}
+
+func TestLRUCache_Evict(t *testing.T) {
+	c := NewCache(NewLRU(6))
+	set(c, 0, 1, 1, 1, nil).Release()
+	set(c, 0, 2, 2, 1, nil).Release()
+	set(c, 1, 1, 4, 1, nil).Release()
+	set(c, 1, 2, 5, 1, nil).Release()
+	set(c, 2, 1, 6, 1, nil).Release()
+	set(c, 2, 2, 7, 1, nil).Release()
+
+	for ns := 0; ns < 3; ns++ {
+		for key := 1; key < 3; key++ {
+			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+				h.Release()
+			} else {
+				t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
+			}
+		}
+	}
+
+	if ok := c.Evict(0, 1); !ok {
+		t.Error("first Cache.Evict on #0.1 return false")
+	}
+	if ok := c.Evict(0, 1); ok {
+		t.Error("second Cache.Evict on #0.1 return true")
+	}
+	if h := c.Get(0, 1, nil); h != nil {
+		t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
+	}
+
+	c.EvictNS(1)
+	if h := c.Get(1, 1, nil); h != nil {
+		t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
+	}
+	if h := c.Get(1, 2, nil); h != nil {
+		t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
+	}
+
+	c.EvictAll()
+	for ns := 0; ns < 3; ns++ {
+		for key := 1; key < 3; key++ {
+			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+				t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
+			}
+		}
+	}
+}
+
+func TestLRUCache_Delete(t *testing.T) {
+	delFuncCalled := 0
+	delFunc := func() {
+		delFuncCalled++
+	}
+
+	c := NewCache(NewLRU(2))
+	set(c, 0, 1, 1, 1, nil).Release()
+	set(c, 0, 2, 2, 1, nil).Release()
+
+	if ok := c.Delete(0, 1, delFunc); !ok {
+		t.Error("Cache.Delete on #1 return false")
+	}
+	if h := c.Get(0, 1, nil); h != nil {
+		t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
+	}
+	if ok := c.Delete(0, 1, delFunc); ok {
+		t.Error("Cache.Delete on #1 return true")
+	}
+
+	h2 := c.Get(0, 2, nil)
+	if h2 == nil {
+		t.Error("Cache.Get on #2 return nil")
+	}
+	if ok := c.Delete(0, 2, delFunc); !ok {
+		t.Error("(1) Cache.Delete on #2 return false")
+	}
+	if ok := c.Delete(0, 2, delFunc); !ok {
+		t.Error("(2) Cache.Delete on #2 return false")
+	}
+
+	set(c, 0, 3, 3, 1, nil).Release()
+	set(c, 0, 4, 4, 1, nil).Release()
+	c.Get(0, 2, nil).Release()
+
+	for key := 2; key <= 4; key++ {
+		if h := c.Get(0, uint64(key), nil); h != nil {
+			h.Release()
+		} else {
+			t.Errorf("Cache.Get on #%d return nil", key)
+		}
+	}
+
+	h2.Release()
+	if h := c.Get(0, 2, nil); h != nil {
+		t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
+	}
+
+	if delFuncCalled != 4 {
+		t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
+	}
+}
+
+func TestLRUCache_Close(t *testing.T) {
+	relFuncCalled := 0
+	relFunc := func() {
+		relFuncCalled++
+	}
+	delFuncCalled := 0
+	delFunc := func() {
+		delFuncCalled++
+	}
+
+	c := NewCache(NewLRU(2))
+	set(c, 0, 1, 1, 1, relFunc).Release()
+	set(c, 0, 2, 2, 1, relFunc).Release()
+
+	h3 := set(c, 0, 3, 3, 1, relFunc)
+	if h3 == nil {
+		t.Error("Cache.Get on #3 return nil")
+	}
+	if ok := c.Delete(0, 3, delFunc); !ok {
+		t.Error("Cache.Delete on #3 return false")
+	}
+
+	c.Close()
+
+	if relFuncCalled != 3 {
+		t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
+	}
+	if delFuncCalled != 1 {
+		t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 0000000..d9a84cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+	"sync"
+	"unsafe"
+)
+
+type lruNode struct {
+	n   *Node
+	h   *Handle
+	ban bool
+
+	next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+	x := at.next
+	at.next = n
+	n.prev = at
+	n.next = x
+	x.prev = n
+}
+
+func (n *lruNode) remove() {
+	if n.prev != nil {
+		n.prev.next = n.next
+		n.next.prev = n.prev
+		n.prev = nil
+		n.next = nil
+	} else {
+		panic("BUG: removing removed node")
+	}
+}
+
+type lru struct {
+	mu       sync.Mutex
+	capacity int
+	used     int
+	recent   lruNode
+}
+
+func (r *lru) reset() {
+	r.recent.next = &r.recent
+	r.recent.prev = &r.recent
+	r.used = 0
+}
+
+func (r *lru) Capacity() int {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+	var evicted []*lruNode
+
+	r.mu.Lock()
+	r.capacity = capacity
+	for r.used > r.capacity {
+		rn := r.recent.prev
+		if rn == nil {
+			panic("BUG: invalid LRU used or capacity counter")
+		}
+		rn.remove()
+		rn.n.CacheData = nil
+		r.used -= rn.n.Size()
+		evicted = append(evicted, rn)
+	}
+	r.mu.Unlock()
+
+	for _, rn := range evicted {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) Promote(n *Node) {
+	var evicted []*lruNode
+
+	r.mu.Lock()
+	if n.CacheData == nil {
+		if n.Size() <= r.capacity {
+			rn := &lruNode{n: n, h: n.GetHandle()}
+			rn.insert(&r.recent)
+			n.CacheData = unsafe.Pointer(rn)
+			r.used += n.Size()
+
+			for r.used > r.capacity {
+				rn := r.recent.prev
+				if rn == nil {
+					panic("BUG: invalid LRU used or capacity counter")
+				}
+				rn.remove()
+				rn.n.CacheData = nil
+				r.used -= rn.n.Size()
+				evicted = append(evicted, rn)
+			}
+		}
+	} else {
+		rn := (*lruNode)(n.CacheData)
+		if !rn.ban {
+			rn.remove()
+			rn.insert(&r.recent)
+		}
+	}
+	r.mu.Unlock()
+
+	for _, rn := range evicted {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) Ban(n *Node) {
+	r.mu.Lock()
+	if n.CacheData == nil {
+		n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+	} else {
+		rn := (*lruNode)(n.CacheData)
+		if !rn.ban {
+			rn.remove()
+			rn.ban = true
+			r.used -= rn.n.Size()
+			r.mu.Unlock()
+
+			rn.h.Release()
+			rn.h = nil
+			return
+		}
+	}
+	r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+	r.mu.Lock()
+	rn := (*lruNode)(n.CacheData)
+	if rn == nil || rn.ban {
+		r.mu.Unlock()
+		return
+	}
+	n.CacheData = nil
+	r.mu.Unlock()
+
+	rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+	var evicted []*lruNode
+
+	r.mu.Lock()
+	for e := r.recent.prev; e != &r.recent; {
+		rn := e
+		e = e.prev
+		if rn.n.NS() == ns {
+			rn.remove()
+			rn.n.CacheData = nil
+			r.used -= rn.n.Size()
+			evicted = append(evicted, rn)
+		}
+	}
+	r.mu.Unlock()
+
+	for _, rn := range evicted {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) EvictAll() {
+	r.mu.Lock()
+	back := r.recent.prev
+	for rn := back; rn != &r.recent; rn = rn.prev {
+		rn.n.CacheData = nil
+	}
+	r.reset()
+	r.mu.Unlock()
+
+	for rn := back; rn != &r.recent; rn = rn.prev {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) Close() error {
+	return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+	r := &lru{capacity: capacity}
+	r.reset()
+	return r
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
new file mode 100644
index 0000000..578a14c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import "git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+
+type iComparer struct {
+	ucmp comparer.Comparer
+}
+
+func (icmp *iComparer) uName() string {
+	return icmp.ucmp.Name()
+}
+
+func (icmp *iComparer) uCompare(a, b []byte) int {
+	return icmp.ucmp.Compare(a, b)
+}
+
+func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
+	return icmp.ucmp.Separator(dst, a, b)
+}
+
+func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
+	return icmp.ucmp.Successor(dst, b)
+}
+
+func (icmp *iComparer) Name() string {
+	return icmp.uName()
+}
+
+func (icmp *iComparer) Compare(a, b []byte) int {
+	x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
+	if x == 0 {
+		if m, n := iKey(a).num(), iKey(b).num(); m > n {
+			x = -1
+		} else if m < n {
+			x = 1
+		}
+	}
+	return x
+}
+
+func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
+	ua, ub := iKey(a).ukey(), iKey(b).ukey()
+	dst = icmp.ucmp.Separator(dst, ua, ub)
+	if dst == nil {
+		return nil
+	}
+	if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
+		dst = append(dst, kMaxNumBytes...)
+	} else {
+		// Did not close possibilities that n maybe longer than len(ub).
+		dst = append(dst, a[len(a)-8:]...)
+	}
+	return dst
+}
+
+func (icmp *iComparer) Successor(dst, b []byte) []byte {
+	ub := iKey(b).ukey()
+	dst = icmp.ucmp.Successor(dst, ub)
+	if dst == nil {
+		return nil
+	}
+	if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
+		dst = append(dst, kMaxNumBytes...)
+	} else {
+		// Did not close possibilities that n maybe longer than len(ub).
+		dst = append(dst, b[len(b)-8:]...)
+	}
+	return dst
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
new file mode 100644
index 0000000..14dddf8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package comparer
+
+import "bytes"
+
+type bytesComparer struct{}
+
+func (bytesComparer) Compare(a, b []byte) int {
+	return bytes.Compare(a, b)
+}
+
+func (bytesComparer) Name() string {
+	return "leveldb.BytewiseComparator"
+}
+
+func (bytesComparer) Separator(dst, a, b []byte) []byte {
+	i, n := 0, len(a)
+	if n > len(b) {
+		n = len(b)
+	}
+	for ; i < n && a[i] == b[i]; i++ {
+	}
+	if i >= n {
+		// Do not shorten if one string is a prefix of the other
+	} else if c := a[i]; c < 0xff && c+1 < b[i] {
+		dst = append(dst, a[:i+1]...)
+		dst[i]++
+		return dst
+	}
+	return nil
+}
+
+func (bytesComparer) Successor(dst, b []byte) []byte {
+	for i, c := range b {
+		if c != 0xff {
+			dst = append(dst, b[:i+1]...)
+			dst[i]++
+			return dst
+		}
+	}
+	return nil
+}
+
+// DefaultComparer are default implementation of the Comparer interface.
+// It uses the natural ordering, consistent with bytes.Compare.
+var DefaultComparer = bytesComparer{}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
new file mode 100644
index 0000000..14a28f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package comparer provides interface and implementation for ordering
+// sets of data.
+package comparer
+
+// BasicComparer is the interface that wraps the basic Compare method.
+type BasicComparer interface {
+	// Compare returns -1, 0, or +1 depending on whether a is 'less than',
+	// 'equal to' or 'greater than' b. The two arguments can only be 'equal'
+	// if their contents are exactly equal. Furthermore, the empty slice
+	// must be 'less than' any non-empty slice.
+	Compare(a, b []byte) int
+}
+
+// Comparer defines a total ordering over the space of []byte keys: a 'less
+// than' relationship.
+type Comparer interface {
+	BasicComparer
+
+	// Name returns name of the comparer.
+	//
+	// The Level-DB on-disk format stores the comparer name, and opening a
+	// database with a different comparer from the one it was created with
+	// will result in an error.
+	//
+	// An implementation to a new name whenever the comparer implementation
+	// changes in a way that will cause the relative ordering of any two keys
+	// to change.
+	//
+	// Names starting with "leveldb." are reserved and should not be used
+	// by any users of this package.
+	Name() string
+
+	// Bellow are advanced functions used used to reduce the space requirements
+	// for internal data structures such as index blocks.
+
+	// Separator appends a sequence of bytes x to dst such that a <= x && x < b,
+	// where 'less than' is consistent with Compare. An implementation should
+	// return nil if x equal to a.
+	//
+	// Either contents of a or b should not by any means modified. Doing so
+	// may cause corruption on the internal state.
+	Separator(dst, a, b []byte) []byte
+
+	// Successor appends a sequence of bytes x to dst such that x >= b, where
+	// 'less than' is consistent with Compare. An implementation should return
+	// nil if x equal to b.
+	//
+	// Contents of b should not by any means modified. Doing so may cause
+	// corruption on the internal state.
+	Successor(dst, b []byte) []byte
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
new file mode 100644
index 0000000..1d6f9cf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
@@ -0,0 +1,500 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"bytes"
+	"fmt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"io"
+	"math/rand"
+	"testing"
+)
+
+const ctValSize = 1000
+
+type dbCorruptHarness struct {
+	dbHarness
+}
+
+func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
+	h := new(dbCorruptHarness)
+	h.init(t, o)
+	return h
+}
+
+func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
+	return newDbCorruptHarnessWopt(t, &opt.Options{
+		BlockCacheCapacity: 100,
+		Strict:             opt.StrictJournalChecksum,
+	})
+}
+
+func (h *dbCorruptHarness) recover() {
+	p := &h.dbHarness
+	t := p.t
+
+	var err error
+	p.db, err = Recover(h.stor, h.o)
+	if err != nil {
+		t.Fatal("Repair: got error: ", err)
+	}
+}
+
+func (h *dbCorruptHarness) build(n int) {
+	p := &h.dbHarness
+	t := p.t
+	db := p.db
+
+	batch := new(Batch)
+	for i := 0; i < n; i++ {
+		batch.Reset()
+		batch.Put(tkey(i), tval(i, ctValSize))
+		err := db.Write(batch, p.wo)
+		if err != nil {
+			t.Fatal("write error: ", err)
+		}
+	}
+}
+
+func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) {
+	p := &h.dbHarness
+	t := p.t
+	db := p.db
+
+	batch := new(Batch)
+	for i := range rnd.Perm(n) {
+		batch.Reset()
+		batch.Put(tkey(i), tval(i, ctValSize))
+		err := db.Write(batch, p.wo)
+		if err != nil {
+			t.Fatal("write error: ", err)
+		}
+	}
+}
+
+func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
+	p := &h.dbHarness
+	t := p.t
+	db := p.db
+
+	batch := new(Batch)
+	for i := 0; i < n; i++ {
+		batch.Reset()
+		batch.Delete(tkey(rnd.Intn(max)))
+		err := db.Write(batch, p.wo)
+		if err != nil {
+			t.Fatal("write error: ", err)
+		}
+	}
+}
+
+func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
+	p := &h.dbHarness
+	t := p.t
+
+	ff, _ := p.stor.GetFiles(ft)
+	sff := files(ff)
+	sff.sort()
+	if fi < 0 {
+		fi = len(sff) - 1
+	}
+	if fi >= len(sff) {
+		t.Fatalf("no such file with type %q with index %d", ft, fi)
+	}
+
+	file := sff[fi]
+
+	r, err := file.Open()
+	if err != nil {
+		t.Fatal("cannot open file: ", err)
+	}
+	x, err := r.Seek(0, 2)
+	if err != nil {
+		t.Fatal("cannot query file size: ", err)
+	}
+	m := int(x)
+	if _, err := r.Seek(0, 0); err != nil {
+		t.Fatal(err)
+	}
+
+	if offset < 0 {
+		if -offset > m {
+			offset = 0
+		} else {
+			offset = m + offset
+		}
+	}
+	if offset > m {
+		offset = m
+	}
+	if offset+n > m {
+		n = m - offset
+	}
+
+	buf := make([]byte, m)
+	_, err = io.ReadFull(r, buf)
+	if err != nil {
+		t.Fatal("cannot read file: ", err)
+	}
+	r.Close()
+
+	for i := 0; i < n; i++ {
+		buf[offset+i] ^= 0x80
+	}
+
+	err = file.Remove()
+	if err != nil {
+		t.Fatal("cannot remove old file: ", err)
+	}
+	w, err := file.Create()
+	if err != nil {
+		t.Fatal("cannot create new file: ", err)
+	}
+	_, err = w.Write(buf)
+	if err != nil {
+		t.Fatal("cannot write new file: ", err)
+	}
+	w.Close()
+}
+
+func (h *dbCorruptHarness) removeAll(ft storage.FileType) {
+	ff, err := h.stor.GetFiles(ft)
+	if err != nil {
+		h.t.Fatal("get files: ", err)
+	}
+	for _, f := range ff {
+		if err := f.Remove(); err != nil {
+			h.t.Error("remove file: ", err)
+		}
+	}
+}
+
+func (h *dbCorruptHarness) removeOne(ft storage.FileType) {
+	ff, err := h.stor.GetFiles(ft)
+	if err != nil {
+		h.t.Fatal("get files: ", err)
+	}
+	f := ff[rand.Intn(len(ff))]
+	h.t.Logf("removing file @%d", f.Num())
+	if err := f.Remove(); err != nil {
+		h.t.Error("remove file: ", err)
+	}
+}
+
+func (h *dbCorruptHarness) check(min, max int) {
+	p := &h.dbHarness
+	t := p.t
+	db := p.db
+
+	var n, badk, badv, missed, good int
+	iter := db.NewIterator(nil, p.ro)
+	for iter.Next() {
+		k := 0
+		fmt.Sscanf(string(iter.Key()), "%d", &k)
+		if k < n {
+			badk++
+			continue
+		}
+		missed += k - n
+		n = k + 1
+		if !bytes.Equal(iter.Value(), tval(k, ctValSize)) {
+			badv++
+		} else {
+			good++
+		}
+	}
+	err := iter.Error()
+	iter.Release()
+	t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v",
+		min, max, good, badk, badv, missed, err)
+	if good < min || good > max {
+		t.Errorf("good entries number not in range")
+	}
+}
+
+func TestCorruptDB_Journal(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.build(100)
+	h.check(100, 100)
+	h.closeDB()
+	h.corrupt(storage.TypeJournal, -1, 19, 1)
+	h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
+
+	h.openDB()
+	h.check(36, 36)
+
+	h.close()
+}
+
+func TestCorruptDB_Table(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.build(100)
+	h.compactMem()
+	h.compactRangeAt(0, "", "")
+	h.compactRangeAt(1, "", "")
+	h.closeDB()
+	h.corrupt(storage.TypeTable, -1, 100, 1)
+
+	h.openDB()
+	h.check(99, 99)
+
+	h.close()
+}
+
+func TestCorruptDB_TableIndex(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.build(10000)
+	h.compactMem()
+	h.closeDB()
+	h.corrupt(storage.TypeTable, -1, -2000, 500)
+
+	h.openDB()
+	h.check(5000, 9999)
+
+	h.close()
+}
+
+func TestCorruptDB_MissingManifest(t *testing.T) {
+	rnd := rand.New(rand.NewSource(0x0badda7a))
+	h := newDbCorruptHarnessWopt(t, &opt.Options{
+		BlockCacheCapacity: 100,
+		Strict:             opt.StrictJournalChecksum,
+		WriteBuffer:        1000 * 60,
+	})
+
+	h.build(1000)
+	h.compactMem()
+	h.buildShuffled(1000, rnd)
+	h.compactMem()
+	h.deleteRand(500, 1000, rnd)
+	h.compactMem()
+	h.buildShuffled(1000, rnd)
+	h.compactMem()
+	h.deleteRand(500, 1000, rnd)
+	h.compactMem()
+	h.buildShuffled(1000, rnd)
+	h.compactMem()
+	h.closeDB()
+
+	h.stor.SetIgnoreOpenErr(storage.TypeManifest)
+	h.removeAll(storage.TypeManifest)
+	h.openAssert(false)
+	h.stor.SetIgnoreOpenErr(0)
+
+	h.recover()
+	h.check(1000, 1000)
+	h.build(1000)
+	h.compactMem()
+	h.compactRange("", "")
+	h.closeDB()
+
+	h.recover()
+	h.check(1000, 1000)
+
+	h.close()
+}
+
+func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.put("foo", "v1")
+	h.put("foo", "v2")
+	h.put("foo", "v3")
+	h.put("foo", "v4")
+	h.put("foo", "v5")
+	h.closeDB()
+
+	h.recover()
+	h.getVal("foo", "v5")
+	h.put("foo", "v6")
+	h.getVal("foo", "v6")
+
+	h.reopenDB()
+	h.getVal("foo", "v6")
+
+	h.close()
+}
+
+func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.put("foo", "v1")
+	h.put("foo", "v2")
+	h.put("foo", "v3")
+	h.compactMem()
+	h.put("foo", "v4")
+	h.put("foo", "v5")
+	h.compactMem()
+	h.closeDB()
+
+	h.recover()
+	h.getVal("foo", "v5")
+	h.put("foo", "v6")
+	h.getVal("foo", "v6")
+
+	h.reopenDB()
+	h.getVal("foo", "v6")
+
+	h.close()
+}
+
+func TestCorruptDB_CorruptedManifest(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.put("foo", "hello")
+	h.compactMem()
+	h.compactRange("", "")
+	h.closeDB()
+	h.corrupt(storage.TypeManifest, -1, 0, 1000)
+	h.openAssert(false)
+
+	h.recover()
+	h.getVal("foo", "hello")
+
+	h.close()
+}
+
+func TestCorruptDB_CompactionInputError(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.build(10)
+	h.compactMem()
+	h.closeDB()
+	h.corrupt(storage.TypeTable, -1, 100, 1)
+
+	h.openDB()
+	h.check(9, 9)
+
+	h.build(10000)
+	h.check(10000, 10000)
+
+	h.close()
+}
+
+func TestCorruptDB_UnrelatedKeys(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.build(10)
+	h.compactMem()
+	h.closeDB()
+	h.corrupt(storage.TypeTable, -1, 100, 1)
+
+	h.openDB()
+	h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
+	h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
+	h.compactMem()
+	h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
+
+	h.close()
+}
+
+func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.put("a", "v1")
+	h.put("b", "v1")
+	h.compactMem()
+	h.put("a", "v2")
+	h.put("b", "v2")
+	h.compactMem()
+	h.put("a", "v3")
+	h.put("b", "v3")
+	h.compactMem()
+	h.put("c", "v0")
+	h.put("d", "v0")
+	h.compactMem()
+	h.compactRangeAt(1, "", "")
+	h.closeDB()
+
+	h.recover()
+	h.getVal("a", "v3")
+	h.getVal("b", "v3")
+	h.getVal("c", "v0")
+	h.getVal("d", "v0")
+
+	h.close()
+}
+
+func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.put("a", "v1")
+	h.put("b", "v1")
+	h.compactMem()
+	h.put("a", "v2")
+	h.put("b", "v2")
+	h.compactMem()
+	h.put("a", "v3")
+	h.put("b", "v3")
+	h.compactMem()
+	h.put("c", "v0")
+	h.put("d", "v0")
+	h.compactMem()
+	h.compactRangeAt(0, "", "")
+	h.closeDB()
+
+	h.recover()
+	h.getVal("a", "v3")
+	h.getVal("b", "v3")
+	h.getVal("c", "v0")
+	h.getVal("d", "v0")
+
+	h.close()
+}
+
+func TestCorruptDB_MissingTableFiles(t *testing.T) {
+	h := newDbCorruptHarness(t)
+
+	h.put("a", "v1")
+	h.put("b", "v1")
+	h.compactMem()
+	h.put("c", "v2")
+	h.put("d", "v2")
+	h.compactMem()
+	h.put("e", "v3")
+	h.put("f", "v3")
+	h.closeDB()
+
+	h.removeOne(storage.TypeTable)
+	h.openAssert(false)
+
+	h.close()
+}
+
+func TestCorruptDB_RecoverTable(t *testing.T) {
+	h := newDbCorruptHarnessWopt(t, &opt.Options{
+		WriteBuffer:         112 * opt.KiB,
+		CompactionTableSize: 90 * opt.KiB,
+		Filter:              filter.NewBloomFilter(10),
+	})
+
+	h.build(1000)
+	h.compactMem()
+	h.compactRangeAt(0, "", "")
+	h.compactRangeAt(1, "", "")
+	seq := h.db.seq
+	h.closeDB()
+	h.corrupt(storage.TypeTable, 0, 1000, 1)
+	h.corrupt(storage.TypeTable, 3, 10000, 1)
+	// Corrupted filter shouldn't affect recovery.
+	h.corrupt(storage.TypeTable, 3, 113888, 10)
+	h.corrupt(storage.TypeTable, -1, 20000, 1)
+
+	h.recover()
+	if h.db.seq != seq {
+		t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
+	}
+	h.check(985, 985)
+
+	h.close()
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
new file mode 100644
index 0000000..900996a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
@@ -0,0 +1,1070 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"container/list"
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type DB struct { // DB is a LevelDB database.
+	// Need 64-bit alignment.
+	seq uint64
+
+	// Session.
+	s *session
+
+	// MemDB.
+	memMu             sync.RWMutex
+	memPool           chan *memdb.DB
+	mem, frozenMem    *memDB
+	journal           *journal.Writer
+	journalWriter     storage.Writer
+	journalFile       storage.File
+	frozenJournalFile storage.File
+	frozenSeq         uint64
+
+	// Snapshot.
+	snapsMu   sync.Mutex
+	snapsList *list.List
+
+	// Stats.
+	aliveSnaps, aliveIters int32
+
+	// Write.
+	writeC       chan *Batch
+	writeMergedC chan bool
+	writeLockC   chan struct{}
+	writeAckC    chan error
+	writeDelay   time.Duration
+	writeDelayN  int
+	journalC     chan *Batch
+	journalAckC  chan error
+
+	// Compaction.
+	tcompCmdC        chan cCmd
+	tcompPauseC      chan chan<- struct{}
+	mcompCmdC        chan cCmd
+	compErrC         chan error
+	compPerErrC      chan error
+	compErrSetC      chan error
+	compWriteLocking bool
+	compStats        []cStats
+
+	// Close.
+	closeW sync.WaitGroup
+	closeC chan struct{}
+	closed uint32
+	closer io.Closer
+}
+
+func openDB(s *session) (*DB, error) {
+	s.log("db@open opening")
+	start := time.Now()
+	db := &DB{
+		s: s,
+		// Initial sequence
+		seq: s.stSeqNum,
+		// MemDB
+		memPool: make(chan *memdb.DB, 1),
+		// Snapshot
+		snapsList: list.New(),
+		// Write
+		writeC:       make(chan *Batch),
+		writeMergedC: make(chan bool),
+		writeLockC:   make(chan struct{}, 1),
+		writeAckC:    make(chan error),
+		journalC:     make(chan *Batch),
+		journalAckC:  make(chan error),
+		// Compaction
+		tcompCmdC:   make(chan cCmd),
+		tcompPauseC: make(chan chan<- struct{}),
+		mcompCmdC:   make(chan cCmd),
+		compErrC:    make(chan error),
+		compPerErrC: make(chan error),
+		compErrSetC: make(chan error),
+		compStats:   make([]cStats, s.o.GetNumLevel()),
+		// Close
+		closeC: make(chan struct{}),
+	}
+
+	// Read-only mode.
+	readOnly := s.o.GetReadOnly()
+
+	if readOnly {
+		// Recover journals (read-only mode).
+		if err := db.recoverJournalRO(); err != nil {
+			return nil, err
+		}
+	} else {
+		// Recover journals.
+		if err := db.recoverJournal(); err != nil {
+			return nil, err
+		}
+
+		// Remove any obsolete files.
+		if err := db.checkAndCleanFiles(); err != nil {
+			// Close journal.
+			if db.journal != nil {
+				db.journal.Close()
+				db.journalWriter.Close()
+			}
+			return nil, err
+		}
+
+	}
+
+	// Doesn't need to be included in the wait group.
+	go db.compactionError()
+	go db.mpoolDrain()
+
+	if readOnly {
+		db.SetReadOnly()
+	} else {
+		db.closeW.Add(3)
+		go db.tCompaction()
+		go db.mCompaction()
+		go db.jWriter()
+	}
+
+	s.logf("db@open done T·%v", time.Since(start))
+
+	runtime.SetFinalizer(db, (*DB).Close)
+	return db, nil
+}
+
+// Open opens or creates a DB for the given storage.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist Open will returns
+// os.ErrExist error.
+//
+// Open will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Corrupted DB can be recovered with Recover
+// function.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+	s, err := newSession(stor, o)
+	if err != nil {
+		return
+	}
+	defer func() {
+		if err != nil {
+			s.close()
+			s.release()
+		}
+	}()
+
+	err = s.recover()
+	if err != nil {
+		if !os.IsNotExist(err) || s.o.GetErrorIfMissing() {
+			return
+		}
+		err = s.create()
+		if err != nil {
+			return
+		}
+	} else if s.o.GetErrorIfExist() {
+		err = os.ErrExist
+		return
+	}
+
+	return openDB(s)
+}
+
+// OpenFile opens or creates a DB for the given path.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist OpenFile will returns
+// os.ErrExist error.
+//
+// OpenFile uses standard file-system backed storage implementation as
+// desribed in the leveldb/storage package.
+//
+// OpenFile will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Corrupted DB can be recovered with Recover
+// function.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func OpenFile(path string, o *opt.Options) (db *DB, err error) {
+	stor, err := storage.OpenFile(path)
+	if err != nil {
+		return
+	}
+	db, err = Open(stor, o)
+	if err != nil {
+		stor.Close()
+	} else {
+		db.closer = stor
+	}
+	return
+}
+
+// Recover recovers and opens a DB with missing or corrupted manifest files
+// for the given storage. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+	s, err := newSession(stor, o)
+	if err != nil {
+		return
+	}
+	defer func() {
+		if err != nil {
+			s.close()
+			s.release()
+		}
+	}()
+
+	err = recoverTable(s, o)
+	if err != nil {
+		return
+	}
+	return openDB(s)
+}
+
+// RecoverFile recovers and opens a DB with missing or corrupted manifest files
+// for the given path. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// RecoverFile uses standard file-system backed storage implementation as desribed
+// in the leveldb/storage package.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
+	stor, err := storage.OpenFile(path)
+	if err != nil {
+		return
+	}
+	db, err = Recover(stor, o)
+	if err != nil {
+		stor.Close()
+	} else {
+		db.closer = stor
+	}
+	return
+}
+
+func recoverTable(s *session, o *opt.Options) error {
+	o = dupOptions(o)
+	// Mask StrictReader, lets StrictRecovery doing its job.
+	o.Strict &= ^opt.StrictReader
+
+	// Get all tables and sort it by file number.
+	tableFiles_, err := s.getFiles(storage.TypeTable)
+	if err != nil {
+		return err
+	}
+	tableFiles := files(tableFiles_)
+	tableFiles.sort()
+
+	var (
+		maxSeq                                                            uint64
+		recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
+
+		// We will drop corrupted table.
+		strict = o.GetStrict(opt.StrictRecovery)
+		noSync = o.GetNoSync()
+
+		rec   = &sessionRecord{}
+		bpool = util.NewBufferPool(o.GetBlockSize() + 5)
+	)
+	buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
+		tmp = s.newTemp()
+		writer, err := tmp.Create()
+		if err != nil {
+			return
+		}
+		defer func() {
+			writer.Close()
+			if err != nil {
+				tmp.Remove()
+				tmp = nil
+			}
+		}()
+
+		// Copy entries.
+		tw := table.NewWriter(writer, o)
+		for iter.Next() {
+			key := iter.Key()
+			if validIkey(key) {
+				err = tw.Append(key, iter.Value())
+				if err != nil {
+					return
+				}
+			}
+		}
+		err = iter.Error()
+		if err != nil {
+			return
+		}
+		err = tw.Close()
+		if err != nil {
+			return
+		}
+		if !noSync {
+			err = writer.Sync()
+			if err != nil {
+				return
+			}
+		}
+		size = int64(tw.BytesLen())
+		return
+	}
+	recoverTable := func(file storage.File) error {
+		s.logf("table@recovery recovering @%d", file.Num())
+		reader, err := file.Open()
+		if err != nil {
+			return err
+		}
+		var closed bool
+		defer func() {
+			if !closed {
+				reader.Close()
+			}
+		}()
+
+		// Get file size.
+		size, err := reader.Seek(0, 2)
+		if err != nil {
+			return err
+		}
+
+		var (
+			tSeq                                     uint64
+			tgoodKey, tcorruptedKey, tcorruptedBlock int
+			imin, imax                               []byte
+		)
+		tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
+		if err != nil {
+			return err
+		}
+		iter := tr.NewIterator(nil, nil)
+		if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
+			itererr.SetErrorCallback(func(err error) {
+				if errors.IsCorrupted(err) {
+					s.logf("table@recovery block corruption @%d %q", file.Num(), err)
+					tcorruptedBlock++
+				}
+			})
+		}
+
+		// Scan the table.
+		for iter.Next() {
+			key := iter.Key()
+			_, seq, _, kerr := parseIkey(key)
+			if kerr != nil {
+				tcorruptedKey++
+				continue
+			}
+			tgoodKey++
+			if seq > tSeq {
+				tSeq = seq
+			}
+			if imin == nil {
+				imin = append([]byte{}, key...)
+			}
+			imax = append(imax[:0], key...)
+		}
+		if err := iter.Error(); err != nil {
+			iter.Release()
+			return err
+		}
+		iter.Release()
+
+		goodKey += tgoodKey
+		corruptedKey += tcorruptedKey
+		corruptedBlock += tcorruptedBlock
+
+		if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
+			droppedTable++
+			s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+			return nil
+		}
+
+		if tgoodKey > 0 {
+			if tcorruptedKey > 0 || tcorruptedBlock > 0 {
+				// Rebuild the table.
+				s.logf("table@recovery rebuilding @%d", file.Num())
+				iter := tr.NewIterator(nil, nil)
+				tmp, newSize, err := buildTable(iter)
+				iter.Release()
+				if err != nil {
+					return err
+				}
+				closed = true
+				reader.Close()
+				if err := file.Replace(tmp); err != nil {
+					return err
+				}
+				size = newSize
+			}
+			if tSeq > maxSeq {
+				maxSeq = tSeq
+			}
+			recoveredKey += tgoodKey
+			// Add table to level 0.
+			rec.addTable(0, file.Num(), uint64(size), imin, imax)
+			s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+		} else {
+			droppedTable++
+			s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
+		}
+
+		return nil
+	}
+
+	// Recover all tables.
+	if len(tableFiles) > 0 {
+		s.logf("table@recovery F·%d", len(tableFiles))
+
+		// Mark file number as used.
+		s.markFileNum(tableFiles[len(tableFiles)-1].Num())
+
+		for _, file := range tableFiles {
+			if err := recoverTable(file); err != nil {
+				return err
+			}
+		}
+
+		s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
+	}
+
+	// Set sequence number.
+	rec.setSeqNum(maxSeq)
+
+	// Create new manifest.
+	if err := s.create(); err != nil {
+		return err
+	}
+
+	// Commit.
+	return s.commit(rec)
+}
+
+func (db *DB) recoverJournal() error {
+	// Get all journals and sort it by file number.
+	allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
+	if err != nil {
+		return err
+	}
+	files(allJournalFiles).sort()
+
+	// Journals that will be recovered.
+	var recJournalFiles []storage.File
+	for _, jf := range allJournalFiles {
+		if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
+			recJournalFiles = append(recJournalFiles, jf)
+		}
+	}
+
+	var (
+		of  storage.File // Obsolete file.
+		rec = &sessionRecord{}
+	)
+
+	// Recover journals.
+	if len(recJournalFiles) > 0 {
+		db.logf("journal@recovery F·%d", len(recJournalFiles))
+
+		// Mark file number as used.
+		db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
+
+		var (
+			// Options.
+			strict      = db.s.o.GetStrict(opt.StrictJournal)
+			checksum    = db.s.o.GetStrict(opt.StrictJournalChecksum)
+			writeBuffer = db.s.o.GetWriteBuffer()
+
+			jr    *journal.Reader
+			mdb   = memdb.New(db.s.icmp, writeBuffer)
+			buf   = &util.Buffer{}
+			batch = &Batch{}
+		)
+
+		for _, jf := range recJournalFiles {
+			db.logf("journal@recovery recovering @%d", jf.Num())
+
+			fr, err := jf.Open()
+			if err != nil {
+				return err
+			}
+
+			// Create or reset journal reader instance.
+			if jr == nil {
+				jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
+			} else {
+				jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
+			}
+
+			// Flush memdb and remove obsolete journal file.
+			if of != nil {
+				if mdb.Len() > 0 {
+					if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
+						fr.Close()
+						return err
+					}
+				}
+
+				rec.setJournalNum(jf.Num())
+				rec.setSeqNum(db.seq)
+				if err := db.s.commit(rec); err != nil {
+					fr.Close()
+					return err
+				}
+				rec.resetAddedTables()
+
+				of.Remove()
+				of = nil
+			}
+
+			// Replay journal to memdb.
+			mdb.Reset()
+			for {
+				r, err := jr.Next()
+				if err != nil {
+					if err == io.EOF {
+						break
+					}
+
+					fr.Close()
+					return errors.SetFile(err, jf)
+				}
+
+				buf.Reset()
+				if _, err := buf.ReadFrom(r); err != nil {
+					if err == io.ErrUnexpectedEOF {
+						// This is error returned due to corruption, with strict == false.
+						continue
+					}
+
+					fr.Close()
+					return errors.SetFile(err, jf)
+				}
+				if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
+					if !strict && errors.IsCorrupted(err) {
+						db.s.logf("journal error: %v (skipped)", err)
+						// We won't apply sequence number as it might be corrupted.
+						continue
+					}
+
+					fr.Close()
+					return errors.SetFile(err, jf)
+				}
+
+				// Save sequence number.
+				db.seq = batch.seq + uint64(batch.Len())
+
+				// Flush it if large enough.
+				if mdb.Size() >= writeBuffer {
+					if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+						fr.Close()
+						return err
+					}
+
+					mdb.Reset()
+				}
+			}
+
+			fr.Close()
+			of = jf
+		}
+
+		// Flush the last memdb.
+		if mdb.Len() > 0 {
+			if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+				return err
+			}
+		}
+	}
+
+	// Create a new journal.
+	if _, err := db.newMem(0); err != nil {
+		return err
+	}
+
+	// Commit.
+	rec.setJournalNum(db.journalFile.Num())
+	rec.setSeqNum(db.seq)
+	if err := db.s.commit(rec); err != nil {
+		// Close journal on error.
+		if db.journal != nil {
+			db.journal.Close()
+			db.journalWriter.Close()
+		}
+		return err
+	}
+
+	// Remove the last obsolete journal file.
+	if of != nil {
+		of.Remove()
+	}
+
+	return nil
+}
+
+func (db *DB) recoverJournalRO() error {
+	// Get all journals and sort it by file number.
+	allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
+	if err != nil {
+		return err
+	}
+	files(allJournalFiles).sort()
+
+	// Journals that will be recovered.
+	var recJournalFiles []storage.File
+	for _, jf := range allJournalFiles {
+		if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
+			recJournalFiles = append(recJournalFiles, jf)
+		}
+	}
+
+	var (
+		// Options.
+		strict      = db.s.o.GetStrict(opt.StrictJournal)
+		checksum    = db.s.o.GetStrict(opt.StrictJournalChecksum)
+		writeBuffer = db.s.o.GetWriteBuffer()
+
+		mdb = memdb.New(db.s.icmp, writeBuffer)
+	)
+
+	// Recover journals.
+	if len(recJournalFiles) > 0 {
+		db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
+
+		var (
+			jr    *journal.Reader
+			buf   = &util.Buffer{}
+			batch = &Batch{}
+		)
+
+		for _, jf := range recJournalFiles {
+			db.logf("journal@recovery recovering @%d", jf.Num())
+
+			fr, err := jf.Open()
+			if err != nil {
+				return err
+			}
+
+			// Create or reset journal reader instance.
+			if jr == nil {
+				jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
+			} else {
+				jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
+			}
+
+			// Replay journal to memdb.
+			for {
+				r, err := jr.Next()
+				if err != nil {
+					if err == io.EOF {
+						break
+					}
+
+					fr.Close()
+					return errors.SetFile(err, jf)
+				}
+
+				buf.Reset()
+				if _, err := buf.ReadFrom(r); err != nil {
+					if err == io.ErrUnexpectedEOF {
+						// This is error returned due to corruption, with strict == false.
+						continue
+					}
+
+					fr.Close()
+					return errors.SetFile(err, jf)
+				}
+				if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
+					if !strict && errors.IsCorrupted(err) {
+						db.s.logf("journal error: %v (skipped)", err)
+						// We won't apply sequence number as it might be corrupted.
+						continue
+					}
+
+					fr.Close()
+					return errors.SetFile(err, jf)
+				}
+
+				// Save sequence number.
+				db.seq = batch.seq + uint64(batch.Len())
+			}
+
+			fr.Close()
+		}
+	}
+
+	// Set memDB.
+	db.mem = &memDB{db: db, DB: mdb, ref: 1}
+
+	return nil
+}
+
+func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
+	ikey := newIkey(key, seq, ktSeek)
+
+	em, fm := db.getMems()
+	for _, m := range [...]*memDB{em, fm} {
+		if m == nil {
+			continue
+		}
+		defer m.decref()
+
+		mk, mv, me := m.Find(ikey)
+		if me == nil {
+			ukey, _, kt, kerr := parseIkey(mk)
+			if kerr != nil {
+				// Shouldn't have had happen.
+				panic(kerr)
+			}
+			if db.s.icmp.uCompare(ukey, key) == 0 {
+				if kt == ktDel {
+					return nil, ErrNotFound
+				}
+				return append([]byte{}, mv...), nil
+			}
+		} else if me != ErrNotFound {
+			return nil, me
+		}
+	}
+
+	v := db.s.version()
+	value, cSched, err := v.get(ikey, ro, false)
+	v.release()
+	if cSched {
+		// Trigger table compaction.
+		db.compSendTrigger(db.tcompCmdC)
+	}
+	return
+}
+
+func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+	ikey := newIkey(key, seq, ktSeek)
+
+	em, fm := db.getMems()
+	for _, m := range [...]*memDB{em, fm} {
+		if m == nil {
+			continue
+		}
+		defer m.decref()
+
+		mk, _, me := m.Find(ikey)
+		if me == nil {
+			ukey, _, kt, kerr := parseIkey(mk)
+			if kerr != nil {
+				// Shouldn't have had happen.
+				panic(kerr)
+			}
+			if db.s.icmp.uCompare(ukey, key) == 0 {
+				if kt == ktDel {
+					return false, nil
+				}
+				return true, nil
+			}
+		} else if me != ErrNotFound {
+			return false, me
+		}
+	}
+
+	v := db.s.version()
+	_, cSched, err := v.get(ikey, ro, true)
+	v.release()
+	if cSched {
+		// Trigger table compaction.
+		db.compSendTrigger(db.tcompCmdC)
+	}
+	if err == nil {
+		ret = true
+	} else if err == ErrNotFound {
+		err = nil
+	}
+	return
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+	err = db.ok()
+	if err != nil {
+		return
+	}
+
+	se := db.acquireSnapshot()
+	defer db.releaseSnapshot(se)
+	return db.get(key, se.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+	err = db.ok()
+	if err != nil {
+		return
+	}
+
+	se := db.acquireSnapshot()
+	defer db.releaseSnapshot(se)
+	return db.has(key, se.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the
+// uderlying DB.
+// The returned iterator is not goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	if err := db.ok(); err != nil {
+		return iterator.NewEmptyIterator(err)
+	}
+
+	se := db.acquireSnapshot()
+	defer db.releaseSnapshot(se)
+	// Iterator holds 'version' lock, 'version' is immutable so snapshot
+	// can be released after iterator created.
+	return db.newIterator(se.seq, slice, ro)
+}
+
+// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
+// is a frozen snapshot of a DB state at a particular point in time. The
+// content of snapshot are guaranteed to be consistent.
+//
+// The snapshot must be released after use, by calling Release method.
+func (db *DB) GetSnapshot() (*Snapshot, error) {
+	if err := db.ok(); err != nil {
+		return nil, err
+	}
+
+	return db.newSnapshot(), nil
+}
+
+// GetProperty returns value of the given property name.
+//
+// Property names:
+//	leveldb.num-files-at-level{n}
+//		Returns the number of files at level 'n'.
+//	leveldb.stats
+//		Returns statistics of the underlying DB.
+//	leveldb.sstables
+//		Returns sstables list for each level.
+//	leveldb.blockpool
+//		Returns block pool stats.
+//	leveldb.cachedblock
+//		Returns size of cached block.
+//	leveldb.openedtables
+//		Returns number of opened tables.
+//	leveldb.alivesnaps
+//		Returns number of alive snapshots.
+//	leveldb.aliveiters
+//		Returns number of alive iterators.
+func (db *DB) GetProperty(name string) (value string, err error) {
+	err = db.ok()
+	if err != nil {
+		return
+	}
+
+	const prefix = "leveldb."
+	if !strings.HasPrefix(name, prefix) {
+		return "", ErrNotFound
+	}
+	p := name[len(prefix):]
+
+	v := db.s.version()
+	defer v.release()
+
+	numFilesPrefix := "num-files-at-level"
+	switch {
+	case strings.HasPrefix(p, numFilesPrefix):
+		var level uint
+		var rest string
+		n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
+		if n != 1 || int(level) >= db.s.o.GetNumLevel() {
+			err = ErrNotFound
+		} else {
+			value = fmt.Sprint(v.tLen(int(level)))
+		}
+	case p == "stats":
+		value = "Compactions\n" +
+			" Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)\n" +
+			"-------+------------+---------------+---------------+---------------+---------------\n"
+		for level, tables := range v.tables {
+			duration, read, write := db.compStats[level].get()
+			if len(tables) == 0 && duration == 0 {
+				continue
+			}
+			value += fmt.Sprintf(" %3d   | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
+				level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
+				float64(read)/1048576.0, float64(write)/1048576.0)
+		}
+	case p == "sstables":
+		for level, tables := range v.tables {
+			value += fmt.Sprintf("--- level %d ---\n", level)
+			for _, t := range tables {
+				value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
+			}
+		}
+	case p == "blockpool":
+		value = fmt.Sprintf("%v", db.s.tops.bpool)
+	case p == "cachedblock":
+		if db.s.tops.bcache != nil {
+			value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
+		} else {
+			value = "<nil>"
+		}
+	case p == "openedtables":
+		value = fmt.Sprintf("%d", db.s.tops.cache.Size())
+	case p == "alivesnaps":
+		value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
+	case p == "aliveiters":
+		value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
+	default:
+		err = ErrNotFound
+	}
+
+	return
+}
+
+// SizeOf calculates approximate sizes of the given key ranges.
+// The length of the returned sizes are equal with the length of the given
+// ranges. The returned sizes measure storage space usage, so if the user
+// data compresses by a factor of ten, the returned sizes will be one-tenth
+// the size of the corresponding user data size.
+// The results may not include the sizes of recently written data.
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
+	if err := db.ok(); err != nil {
+		return nil, err
+	}
+
+	v := db.s.version()
+	defer v.release()
+
+	sizes := make(Sizes, 0, len(ranges))
+	for _, r := range ranges {
+		imin := newIkey(r.Start, kMaxSeq, ktSeek)
+		imax := newIkey(r.Limit, kMaxSeq, ktSeek)
+		start, err := v.offsetOf(imin)
+		if err != nil {
+			return nil, err
+		}
+		limit, err := v.offsetOf(imax)
+		if err != nil {
+			return nil, err
+		}
+		var size uint64
+		if limit >= start {
+			size = limit - start
+		}
+		sizes = append(sizes, size)
+	}
+
+	return sizes, nil
+}
+
+// Close closes the DB. This will also releases any outstanding snapshot and
+// abort any in-flight compaction.
+//
+// It is not safe to close a DB until all outstanding iterators are released.
+// It is valid to call Close multiple times. Other methods should not be
+// called after the DB has been closed.
+func (db *DB) Close() error {
+	if !db.setClosed() {
+		return ErrClosed
+	}
+
+	start := time.Now()
+	db.log("db@close closing")
+
+	// Clear the finalizer.
+	runtime.SetFinalizer(db, nil)
+
+	// Get compaction error.
+	var err error
+	select {
+	case err = <-db.compErrC:
+		if err == ErrReadOnly {
+			err = nil
+		}
+	default:
+	}
+
+	// Signal all goroutines.
+	close(db.closeC)
+
+	// Wait for all gorotines to exit.
+	db.closeW.Wait()
+
+	// Lock writer and closes journal.
+	db.writeLockC <- struct{}{}
+	if db.journal != nil {
+		db.journal.Close()
+		db.journalWriter.Close()
+	}
+
+	if db.writeDelayN > 0 {
+		db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+	}
+
+	// Close session.
+	db.s.close()
+	db.logf("db@close done T·%v", time.Since(start))
+	db.s.release()
+
+	if db.closer != nil {
+		if err1 := db.closer.Close(); err == nil {
+			err = err1
+		}
+	}
+
+	// NIL'ing pointers.
+	db.s = nil
+	db.mem = nil
+	db.frozenMem = nil
+	db.journal = nil
+	db.journalWriter = nil
+	db.journalFile = nil
+	db.frozenJournalFile = nil
+	db.closer = nil
+
+	return err
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
new file mode 100644
index 0000000..76401c9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -0,0 +1,791 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"sync"
+	"time"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+var (
+	errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
+)
+
+type cStats struct {
+	sync.Mutex
+	duration time.Duration
+	read     uint64
+	write    uint64
+}
+
+func (p *cStats) add(n *cStatsStaging) {
+	p.Lock()
+	p.duration += n.duration
+	p.read += n.read
+	p.write += n.write
+	p.Unlock()
+}
+
+func (p *cStats) get() (duration time.Duration, read, write uint64) {
+	p.Lock()
+	defer p.Unlock()
+	return p.duration, p.read, p.write
+}
+
+type cStatsStaging struct {
+	start    time.Time
+	duration time.Duration
+	on       bool
+	read     uint64
+	write    uint64
+}
+
+func (p *cStatsStaging) startTimer() {
+	if !p.on {
+		p.start = time.Now()
+		p.on = true
+	}
+}
+
+func (p *cStatsStaging) stopTimer() {
+	if p.on {
+		p.duration += time.Since(p.start)
+		p.on = false
+	}
+}
+
+func (db *DB) compactionError() {
+	var err error
+noerr:
+	// No error.
+	for {
+		select {
+		case err = <-db.compErrSetC:
+			switch {
+			case err == nil:
+			case err == ErrReadOnly, errors.IsCorrupted(err):
+				goto hasperr
+			default:
+				goto haserr
+			}
+		case _, _ = <-db.closeC:
+			return
+		}
+	}
+haserr:
+	// Transient error.
+	for {
+		select {
+		case db.compErrC <- err:
+		case err = <-db.compErrSetC:
+			switch {
+			case err == nil:
+				goto noerr
+			case err == ErrReadOnly, errors.IsCorrupted(err):
+				goto hasperr
+			default:
+			}
+		case _, _ = <-db.closeC:
+			return
+		}
+	}
+hasperr:
+	// Persistent error.
+	for {
+		select {
+		case db.compErrC <- err:
+		case db.compPerErrC <- err:
+		case db.writeLockC <- struct{}{}:
+			// Hold write lock, so that write won't pass-through.
+			db.compWriteLocking = true
+		case _, _ = <-db.closeC:
+			if db.compWriteLocking {
+				// We should release the lock or Close will hang.
+				<-db.writeLockC
+			}
+			return
+		}
+	}
+}
+
+type compactionTransactCounter int
+
+func (cnt *compactionTransactCounter) incr() {
+	*cnt++
+}
+
+type compactionTransactInterface interface {
+	run(cnt *compactionTransactCounter) error
+	revert() error
+}
+
+func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
+	defer func() {
+		if x := recover(); x != nil {
+			if x == errCompactionTransactExiting {
+				if err := t.revert(); err != nil {
+					db.logf("%s revert error %q", name, err)
+				}
+			}
+			panic(x)
+		}
+	}()
+
+	const (
+		backoffMin = 1 * time.Second
+		backoffMax = 8 * time.Second
+		backoffMul = 2 * time.Second
+	)
+	var (
+		backoff  = backoffMin
+		backoffT = time.NewTimer(backoff)
+		lastCnt  = compactionTransactCounter(0)
+
+		disableBackoff = db.s.o.GetDisableCompactionBackoff()
+	)
+	for n := 0; ; n++ {
+		// Check wether the DB is closed.
+		if db.isClosed() {
+			db.logf("%s exiting", name)
+			db.compactionExitTransact()
+		} else if n > 0 {
+			db.logf("%s retrying N·%d", name, n)
+		}
+
+		// Execute.
+		cnt := compactionTransactCounter(0)
+		err := t.run(&cnt)
+		if err != nil {
+			db.logf("%s error I·%d %q", name, cnt, err)
+		}
+
+		// Set compaction error status.
+		select {
+		case db.compErrSetC <- err:
+		case perr := <-db.compPerErrC:
+			if err != nil {
+				db.logf("%s exiting (persistent error %q)", name, perr)
+				db.compactionExitTransact()
+			}
+		case _, _ = <-db.closeC:
+			db.logf("%s exiting", name)
+			db.compactionExitTransact()
+		}
+		if err == nil {
+			return
+		}
+		if errors.IsCorrupted(err) {
+			db.logf("%s exiting (corruption detected)", name)
+			db.compactionExitTransact()
+		}
+
+		if !disableBackoff {
+			// Reset backoff duration if counter is advancing.
+			if cnt > lastCnt {
+				backoff = backoffMin
+				lastCnt = cnt
+			}
+
+			// Backoff.
+			backoffT.Reset(backoff)
+			if backoff < backoffMax {
+				backoff *= backoffMul
+				if backoff > backoffMax {
+					backoff = backoffMax
+				}
+			}
+			select {
+			case <-backoffT.C:
+			case _, _ = <-db.closeC:
+				db.logf("%s exiting", name)
+				db.compactionExitTransact()
+			}
+		}
+	}
+}
+
+type compactionTransactFunc struct {
+	runFunc    func(cnt *compactionTransactCounter) error
+	revertFunc func() error
+}
+
+func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
+	return t.runFunc(cnt)
+}
+
+func (t *compactionTransactFunc) revert() error {
+	if t.revertFunc != nil {
+		return t.revertFunc()
+	}
+	return nil
+}
+
+func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
+	db.compactionTransact(name, &compactionTransactFunc{run, revert})
+}
+
+func (db *DB) compactionExitTransact() {
+	panic(errCompactionTransactExiting)
+}
+
+func (db *DB) memCompaction() {
+	mdb := db.getFrozenMem()
+	if mdb == nil {
+		return
+	}
+	defer mdb.decref()
+
+	db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
+
+	// Don't compact empty memdb.
+	if mdb.Len() == 0 {
+		db.logf("memdb@flush skipping")
+		// drop frozen memdb
+		db.dropFrozenMem()
+		return
+	}
+
+	// Pause table compaction.
+	resumeC := make(chan struct{})
+	select {
+	case db.tcompPauseC <- (chan<- struct{})(resumeC):
+	case <-db.compPerErrC:
+		close(resumeC)
+		resumeC = nil
+	case _, _ = <-db.closeC:
+		return
+	}
+
+	var (
+		rec        = &sessionRecord{}
+		stats      = &cStatsStaging{}
+		flushLevel int
+	)
+
+	db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
+		stats.startTimer()
+		flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
+		stats.stopTimer()
+		return
+	}, func() error {
+		for _, r := range rec.addedTables {
+			db.logf("memdb@flush revert @%d", r.num)
+			f := db.s.getTableFile(r.num)
+			if err := f.Remove(); err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+
+	db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
+		stats.startTimer()
+		rec.setJournalNum(db.journalFile.Num())
+		rec.setSeqNum(db.frozenSeq)
+		err = db.s.commit(rec)
+		stats.stopTimer()
+		return
+	}, nil)
+
+	db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
+
+	for _, r := range rec.addedTables {
+		stats.write += r.size
+	}
+	db.compStats[flushLevel].add(stats)
+
+	// Drop frozen memdb.
+	db.dropFrozenMem()
+
+	// Resume table compaction.
+	if resumeC != nil {
+		select {
+		case <-resumeC:
+			close(resumeC)
+		case _, _ = <-db.closeC:
+			return
+		}
+	}
+
+	// Trigger table compaction.
+	db.compSendTrigger(db.tcompCmdC)
+}
+
+type tableCompactionBuilder struct {
+	db           *DB
+	s            *session
+	c            *compaction
+	rec          *sessionRecord
+	stat0, stat1 *cStatsStaging
+
+	snapHasLastUkey bool
+	snapLastUkey    []byte
+	snapLastSeq     uint64
+	snapIter        int
+	snapKerrCnt     int
+	snapDropCnt     int
+
+	kerrCnt int
+	dropCnt int
+
+	minSeq    uint64
+	strict    bool
+	tableSize int
+
+	tw *tWriter
+}
+
+func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
+	// Create new table if not already.
+	if b.tw == nil {
+		// Check for pause event.
+		if b.db != nil {
+			select {
+			case ch := <-b.db.tcompPauseC:
+				b.db.pauseCompaction(ch)
+			case _, _ = <-b.db.closeC:
+				b.db.compactionExitTransact()
+			default:
+			}
+		}
+
+		// Create new table.
+		var err error
+		b.tw, err = b.s.tops.create()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Write key/value into table.
+	return b.tw.append(key, value)
+}
+
+func (b *tableCompactionBuilder) needFlush() bool {
+	return b.tw.tw.BytesLen() >= b.tableSize
+}
+
+func (b *tableCompactionBuilder) flush() error {
+	t, err := b.tw.finish()
+	if err != nil {
+		return err
+	}
+	b.rec.addTableFile(b.c.level+1, t)
+	b.stat1.write += t.size
+	b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
+	b.tw = nil
+	return nil
+}
+
+func (b *tableCompactionBuilder) cleanup() {
+	if b.tw != nil {
+		b.tw.drop()
+		b.tw = nil
+	}
+}
+
+func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
+	snapResumed := b.snapIter > 0
+	hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
+	lastUkey := append([]byte{}, b.snapLastUkey...)
+	lastSeq := b.snapLastSeq
+	b.kerrCnt = b.snapKerrCnt
+	b.dropCnt = b.snapDropCnt
+	// Restore compaction state.
+	b.c.restore()
+
+	defer b.cleanup()
+
+	b.stat1.startTimer()
+	defer b.stat1.stopTimer()
+
+	iter := b.c.newIterator()
+	defer iter.Release()
+	for i := 0; iter.Next(); i++ {
+		// Incr transact counter.
+		cnt.incr()
+
+		// Skip until last state.
+		if i < b.snapIter {
+			continue
+		}
+
+		resumed := false
+		if snapResumed {
+			resumed = true
+			snapResumed = false
+		}
+
+		ikey := iter.Key()
+		ukey, seq, kt, kerr := parseIkey(ikey)
+
+		if kerr == nil {
+			shouldStop := !resumed && b.c.shouldStopBefore(ikey)
+
+			if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
+				// First occurrence of this user key.
+
+				// Only rotate tables if ukey doesn't hop across.
+				if b.tw != nil && (shouldStop || b.needFlush()) {
+					if err := b.flush(); err != nil {
+						return err
+					}
+
+					// Creates snapshot of the state.
+					b.c.save()
+					b.snapHasLastUkey = hasLastUkey
+					b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
+					b.snapLastSeq = lastSeq
+					b.snapIter = i
+					b.snapKerrCnt = b.kerrCnt
+					b.snapDropCnt = b.dropCnt
+				}
+
+				hasLastUkey = true
+				lastUkey = append(lastUkey[:0], ukey...)
+				lastSeq = kMaxSeq
+			}
+
+			switch {
+			case lastSeq <= b.minSeq:
+				// Dropped because newer entry for same user key exist
+				fallthrough // (A)
+			case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+				// For this user key:
+				// (1) there is no data in higher levels
+				// (2) data in lower levels will have larger seq numbers
+				// (3) data in layers that are being compacted here and have
+				//     smaller seq numbers will be dropped in the next
+				//     few iterations of this loop (by rule (A) above).
+				// Therefore this deletion marker is obsolete and can be dropped.
+				lastSeq = seq
+				b.dropCnt++
+				continue
+			default:
+				lastSeq = seq
+			}
+		} else {
+			if b.strict {
+				return kerr
+			}
+
+			// Don't drop corrupted keys.
+			hasLastUkey = false
+			lastUkey = lastUkey[:0]
+			lastSeq = kMaxSeq
+			b.kerrCnt++
+		}
+
+		if err := b.appendKV(ikey, iter.Value()); err != nil {
+			return err
+		}
+	}
+
+	if err := iter.Error(); err != nil {
+		return err
+	}
+
+	// Finish last table.
+	if b.tw != nil && !b.tw.empty() {
+		return b.flush()
+	}
+	return nil
+}
+
+func (b *tableCompactionBuilder) revert() error {
+	for _, at := range b.rec.addedTables {
+		b.s.logf("table@build revert @%d", at.num)
+		f := b.s.getTableFile(at.num)
+		if err := f.Remove(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
+	defer c.release()
+
+	rec := &sessionRecord{}
+	rec.addCompPtr(c.level, c.imax)
+
+	if !noTrivial && c.trivial() {
+		t := c.tables[0][0]
+		db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
+		rec.delTable(c.level, t.file.Num())
+		rec.addTableFile(c.level+1, t)
+		db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
+			return db.s.commit(rec)
+		}, nil)
+		return
+	}
+
+	var stats [2]cStatsStaging
+	for i, tables := range c.tables {
+		for _, t := range tables {
+			stats[i].read += t.size
+			// Insert deleted tables into record
+			rec.delTable(c.level+i, t.file.Num())
+		}
+	}
+	sourceSize := int(stats[0].read + stats[1].read)
+	minSeq := db.minSeq()
+	db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
+
+	b := &tableCompactionBuilder{
+		db:        db,
+		s:         db.s,
+		c:         c,
+		rec:       rec,
+		stat1:     &stats[1],
+		minSeq:    minSeq,
+		strict:    db.s.o.GetStrict(opt.StrictCompaction),
+		tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
+	}
+	db.compactionTransact("table@build", b)
+
+	// Commit changes
+	db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
+		stats[1].startTimer()
+		defer stats[1].stopTimer()
+		return db.s.commit(rec)
+	}, nil)
+
+	resultSize := int(stats[1].write)
+	db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
+
+	// Save compaction stats
+	for i := range stats {
+		db.compStats[c.level+1].add(&stats[i])
+	}
+}
+
+func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
+	db.logf("table@compaction range L%d %q:%q", level, umin, umax)
+
+	if level >= 0 {
+		if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+			db.tableCompaction(c, true)
+		}
+	} else {
+		v := db.s.version()
+		m := 1
+		for i, t := range v.tables[1:] {
+			if t.overlaps(db.s.icmp, umin, umax, false) {
+				m = i + 1
+			}
+		}
+		v.release()
+
+		for level := 0; level < m; level++ {
+			if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+				db.tableCompaction(c, true)
+			}
+		}
+	}
+}
+
+func (db *DB) tableAutoCompaction() {
+	if c := db.s.pickCompaction(); c != nil {
+		db.tableCompaction(c, false)
+	}
+}
+
+func (db *DB) tableNeedCompaction() bool {
+	v := db.s.version()
+	defer v.release()
+	return v.needCompaction()
+}
+
+func (db *DB) pauseCompaction(ch chan<- struct{}) {
+	select {
+	case ch <- struct{}{}:
+	case _, _ = <-db.closeC:
+		db.compactionExitTransact()
+	}
+}
+
+type cCmd interface {
+	ack(err error)
+}
+
+type cIdle struct {
+	ackC chan<- error
+}
+
+func (r cIdle) ack(err error) {
+	if r.ackC != nil {
+		defer func() {
+			recover()
+		}()
+		r.ackC <- err
+	}
+}
+
+type cRange struct {
+	level    int
+	min, max []byte
+	ackC     chan<- error
+}
+
+func (r cRange) ack(err error) {
+	if r.ackC != nil {
+		defer func() {
+			recover()
+		}()
+		r.ackC <- err
+	}
+}
+
+// This will trigger auto compation and/or wait for all compaction to be done.
+func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
+	ch := make(chan error)
+	defer close(ch)
+	// Send cmd.
+	select {
+	case compC <- cIdle{ch}:
+	case err = <-db.compErrC:
+		return
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+	// Wait cmd.
+	select {
+	case err = <-ch:
+	case err = <-db.compErrC:
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+	return err
+}
+
+// This will trigger auto compaction but will not wait for it.
+func (db *DB) compSendTrigger(compC chan<- cCmd) {
+	select {
+	case compC <- cIdle{}:
+	default:
+	}
+}
+
+// Send range compaction request.
+func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
+	ch := make(chan error)
+	defer close(ch)
+	// Send cmd.
+	select {
+	case compC <- cRange{level, min, max, ch}:
+	case err := <-db.compErrC:
+		return err
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+	// Wait cmd.
+	select {
+	case err = <-ch:
+	case err = <-db.compErrC:
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+	return err
+}
+
+func (db *DB) mCompaction() {
+	var x cCmd
+
+	defer func() {
+		if x := recover(); x != nil {
+			if x != errCompactionTransactExiting {
+				panic(x)
+			}
+		}
+		if x != nil {
+			x.ack(ErrClosed)
+		}
+		db.closeW.Done()
+	}()
+
+	for {
+		select {
+		case x = <-db.mcompCmdC:
+			switch x.(type) {
+			case cIdle:
+				db.memCompaction()
+				x.ack(nil)
+				x = nil
+			default:
+				panic("leveldb: unknown command")
+			}
+		case _, _ = <-db.closeC:
+			return
+		}
+	}
+}
+
+func (db *DB) tCompaction() {
+	var x cCmd
+	var ackQ []cCmd
+
+	defer func() {
+		if x := recover(); x != nil {
+			if x != errCompactionTransactExiting {
+				panic(x)
+			}
+		}
+		for i := range ackQ {
+			ackQ[i].ack(ErrClosed)
+			ackQ[i] = nil
+		}
+		if x != nil {
+			x.ack(ErrClosed)
+		}
+		db.closeW.Done()
+	}()
+
+	for {
+		if db.tableNeedCompaction() {
+			select {
+			case x = <-db.tcompCmdC:
+			case ch := <-db.tcompPauseC:
+				db.pauseCompaction(ch)
+				continue
+			case _, _ = <-db.closeC:
+				return
+			default:
+			}
+		} else {
+			for i := range ackQ {
+				ackQ[i].ack(nil)
+				ackQ[i] = nil
+			}
+			ackQ = ackQ[:0]
+			select {
+			case x = <-db.tcompCmdC:
+			case ch := <-db.tcompPauseC:
+				db.pauseCompaction(ch)
+				continue
+			case _, _ = <-db.closeC:
+				return
+			}
+		}
+		if x != nil {
+			switch cmd := x.(type) {
+			case cIdle:
+				ackQ = append(ackQ, x)
+			case cRange:
+				db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
+				x.ack(nil)
+			default:
+				panic("leveldb: unknown command")
+			}
+			x = nil
+		}
+		db.tableAutoCompaction()
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
new file mode 100644
index 0000000..a0a8d2b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -0,0 +1,350 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"errors"
+	"math/rand"
+	"runtime"
+	"sync"
+	"sync/atomic"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
+)
+
+type memdbReleaser struct {
+	once sync.Once
+	m    *memDB
+}
+
+func (mr *memdbReleaser) Release() {
+	mr.once.Do(func() {
+		mr.m.decref()
+	})
+}
+
+func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	em, fm := db.getMems()
+	v := db.s.version()
+
+	ti := v.getIterators(slice, ro)
+	n := len(ti) + 2
+	i := make([]iterator.Iterator, 0, n)
+	emi := em.NewIterator(slice)
+	emi.SetReleaser(&memdbReleaser{m: em})
+	i = append(i, emi)
+	if fm != nil {
+		fmi := fm.NewIterator(slice)
+		fmi.SetReleaser(&memdbReleaser{m: fm})
+		i = append(i, fmi)
+	}
+	i = append(i, ti...)
+	strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
+	mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
+	mi.SetReleaser(&versionReleaser{v: v})
+	return mi
+}
+
+func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
+	var islice *util.Range
+	if slice != nil {
+		islice = &util.Range{}
+		if slice.Start != nil {
+			islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
+		}
+		if slice.Limit != nil {
+			islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
+		}
+	}
+	rawIter := db.newRawIterator(islice, ro)
+	iter := &dbIter{
+		db:     db,
+		icmp:   db.s.icmp,
+		iter:   rawIter,
+		seq:    seq,
+		strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
+		key:    make([]byte, 0),
+		value:  make([]byte, 0),
+	}
+	atomic.AddInt32(&db.aliveIters, 1)
+	runtime.SetFinalizer(iter, (*dbIter).Release)
+	return iter
+}
+
+func (db *DB) iterSamplingRate() int {
+	return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
+}
+
+type dir int
+
+const (
+	dirReleased dir = iota - 1
+	dirSOI
+	dirEOI
+	dirBackward
+	dirForward
+)
+
+// dbIter represent an interator states over a database session.
+type dbIter struct {
+	db     *DB
+	icmp   *iComparer
+	iter   iterator.Iterator
+	seq    uint64
+	strict bool
+
+	smaplingGap int
+	dir         dir
+	key         []byte
+	value       []byte
+	err         error
+	releaser    util.Releaser
+}
+
+func (i *dbIter) sampleSeek() {
+	ikey := i.iter.Key()
+	i.smaplingGap -= len(ikey) + len(i.iter.Value())
+	for i.smaplingGap < 0 {
+		i.smaplingGap += i.db.iterSamplingRate()
+		i.db.sampleSeek(ikey)
+	}
+}
+
+func (i *dbIter) setErr(err error) {
+	i.err = err
+	i.key = nil
+	i.value = nil
+}
+
+func (i *dbIter) iterErr() {
+	if err := i.iter.Error(); err != nil {
+		i.setErr(err)
+	}
+}
+
+func (i *dbIter) Valid() bool {
+	return i.err == nil && i.dir > dirEOI
+}
+
+func (i *dbIter) First() bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.iter.First() {
+		i.dir = dirSOI
+		return i.next()
+	}
+	i.dir = dirEOI
+	i.iterErr()
+	return false
+}
+
+func (i *dbIter) Last() bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.iter.Last() {
+		return i.prev()
+	}
+	i.dir = dirSOI
+	i.iterErr()
+	return false
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	ikey := newIkey(key, i.seq, ktSeek)
+	if i.iter.Seek(ikey) {
+		i.dir = dirSOI
+		return i.next()
+	}
+	i.dir = dirEOI
+	i.iterErr()
+	return false
+}
+
+func (i *dbIter) next() bool {
+	for {
+		if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+			i.sampleSeek()
+			if seq <= i.seq {
+				switch kt {
+				case ktDel:
+					// Skip deleted key.
+					i.key = append(i.key[:0], ukey...)
+					i.dir = dirForward
+				case ktVal:
+					if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
+						i.key = append(i.key[:0], ukey...)
+						i.value = append(i.value[:0], i.iter.Value()...)
+						i.dir = dirForward
+						return true
+					}
+				}
+			}
+		} else if i.strict {
+			i.setErr(kerr)
+			break
+		}
+		if !i.iter.Next() {
+			i.dir = dirEOI
+			i.iterErr()
+			break
+		}
+	}
+	return false
+}
+
+func (i *dbIter) Next() bool {
+	if i.dir == dirEOI || i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) {
+		i.dir = dirEOI
+		i.iterErr()
+		return false
+	}
+	return i.next()
+}
+
+func (i *dbIter) prev() bool {
+	i.dir = dirBackward
+	del := true
+	if i.iter.Valid() {
+		for {
+			if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+				i.sampleSeek()
+				if seq <= i.seq {
+					if !del && i.icmp.uCompare(ukey, i.key) < 0 {
+						return true
+					}
+					del = (kt == ktDel)
+					if !del {
+						i.key = append(i.key[:0], ukey...)
+						i.value = append(i.value[:0], i.iter.Value()...)
+					}
+				}
+			} else if i.strict {
+				i.setErr(kerr)
+				return false
+			}
+			if !i.iter.Prev() {
+				break
+			}
+		}
+	}
+	if del {
+		i.dir = dirSOI
+		i.iterErr()
+		return false
+	}
+	return true
+}
+
+func (i *dbIter) Prev() bool {
+	if i.dir == dirSOI || i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	switch i.dir {
+	case dirEOI:
+		return i.Last()
+	case dirForward:
+		for i.iter.Prev() {
+			if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
+				i.sampleSeek()
+				if i.icmp.uCompare(ukey, i.key) < 0 {
+					goto cont
+				}
+			} else if i.strict {
+				i.setErr(kerr)
+				return false
+			}
+		}
+		i.dir = dirSOI
+		i.iterErr()
+		return false
+	}
+
+cont:
+	return i.prev()
+}
+
+func (i *dbIter) Key() []byte {
+	if i.err != nil || i.dir <= dirEOI {
+		return nil
+	}
+	return i.key
+}
+
+func (i *dbIter) Value() []byte {
+	if i.err != nil || i.dir <= dirEOI {
+		return nil
+	}
+	return i.value
+}
+
+func (i *dbIter) Release() {
+	if i.dir != dirReleased {
+		// Clear the finalizer.
+		runtime.SetFinalizer(i, nil)
+
+		if i.releaser != nil {
+			i.releaser.Release()
+			i.releaser = nil
+		}
+
+		i.dir = dirReleased
+		i.key = nil
+		i.value = nil
+		i.iter.Release()
+		i.iter = nil
+		atomic.AddInt32(&i.db.aliveIters, -1)
+		i.db = nil
+	}
+}
+
+func (i *dbIter) SetReleaser(releaser util.Releaser) {
+	if i.dir == dirReleased {
+		panic(util.ErrReleased)
+	}
+	if i.releaser != nil && releaser != nil {
+		panic(util.ErrHasReleaser)
+	}
+	i.releaser = releaser
+}
+
+func (i *dbIter) Error() error {
+	return i.err
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
new file mode 100644
index 0000000..c81121b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
@@ -0,0 +1,183 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"container/list"
+	"fmt"
+	"runtime"
+	"sync"
+	"sync/atomic"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type snapshotElement struct {
+	seq uint64
+	ref int
+	e   *list.Element
+}
+
+// Acquires a snapshot, based on latest sequence.
+func (db *DB) acquireSnapshot() *snapshotElement {
+	db.snapsMu.Lock()
+	defer db.snapsMu.Unlock()
+
+	seq := db.getSeq()
+
+	if e := db.snapsList.Back(); e != nil {
+		se := e.Value.(*snapshotElement)
+		if se.seq == seq {
+			se.ref++
+			return se
+		} else if seq < se.seq {
+			panic("leveldb: sequence number is not increasing")
+		}
+	}
+	se := &snapshotElement{seq: seq, ref: 1}
+	se.e = db.snapsList.PushBack(se)
+	return se
+}
+
+// Releases given snapshot element.
+func (db *DB) releaseSnapshot(se *snapshotElement) {
+	db.snapsMu.Lock()
+	defer db.snapsMu.Unlock()
+
+	se.ref--
+	if se.ref == 0 {
+		db.snapsList.Remove(se.e)
+		se.e = nil
+	} else if se.ref < 0 {
+		panic("leveldb: Snapshot: negative element reference")
+	}
+}
+
+// Gets minimum sequence that not being snapshoted.
+func (db *DB) minSeq() uint64 {
+	db.snapsMu.Lock()
+	defer db.snapsMu.Unlock()
+
+	if e := db.snapsList.Front(); e != nil {
+		return e.Value.(*snapshotElement).seq
+	}
+
+	return db.getSeq()
+}
+
+// Snapshot is a DB snapshot.
+type Snapshot struct {
+	db       *DB
+	elem     *snapshotElement
+	mu       sync.RWMutex
+	released bool
+}
+
+// Creates new snapshot object.
+func (db *DB) newSnapshot() *Snapshot {
+	snap := &Snapshot{
+		db:   db,
+		elem: db.acquireSnapshot(),
+	}
+	atomic.AddInt32(&db.aliveSnaps, 1)
+	runtime.SetFinalizer(snap, (*Snapshot).Release)
+	return snap
+}
+
+func (snap *Snapshot) String() string {
+	return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if
+// the DB does not contains the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+	err = snap.db.ok()
+	if err != nil {
+		return
+	}
+	snap.mu.RLock()
+	defer snap.mu.RUnlock()
+	if snap.released {
+		err = ErrSnapshotReleased
+		return
+	}
+	return snap.db.get(key, snap.elem.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+	err = snap.db.ok()
+	if err != nil {
+		return
+	}
+	snap.mu.RLock()
+	defer snap.mu.RUnlock()
+	if snap.released {
+		err = ErrSnapshotReleased
+		return
+	}
+	return snap.db.has(key, snap.elem.seq, ro)
+}
+
+// NewIterator returns an iterator for the snapshot of the uderlying DB.
+// The returned iterator is not goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+// Releasing the snapshot doesn't mean releasing the iterator too, the
+// iterator would be still valid until released.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	if err := snap.db.ok(); err != nil {
+		return iterator.NewEmptyIterator(err)
+	}
+	snap.mu.Lock()
+	defer snap.mu.Unlock()
+	if snap.released {
+		return iterator.NewEmptyIterator(ErrSnapshotReleased)
+	}
+	// Since iterator already hold version ref, it doesn't need to
+	// hold snapshot ref.
+	return snap.db.newIterator(snap.elem.seq, slice, ro)
+}
+
+// Release releases the snapshot. This will not release any returned
+// iterators, the iterators would still be valid until released or the
+// underlying DB is closed.
+//
+// Other methods should not be called after the snapshot has been released.
+func (snap *Snapshot) Release() {
+	snap.mu.Lock()
+	defer snap.mu.Unlock()
+
+	if !snap.released {
+		// Clear the finalizer.
+		runtime.SetFinalizer(snap, nil)
+
+		snap.released = true
+		snap.db.releaseSnapshot(snap.elem)
+		atomic.AddInt32(&snap.db.aliveSnaps, -1)
+		snap.db = nil
+		snap.elem = nil
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
new file mode 100644
index 0000000..56f1adf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -0,0 +1,211 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"sync/atomic"
+	"time"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
+)
+
+type memDB struct {
+	db *DB
+	*memdb.DB
+	ref int32
+}
+
+func (m *memDB) incref() {
+	atomic.AddInt32(&m.ref, 1)
+}
+
+func (m *memDB) decref() {
+	if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
+		// Only put back memdb with std capacity.
+		if m.Capacity() == m.db.s.o.GetWriteBuffer() {
+			m.Reset()
+			m.db.mpoolPut(m.DB)
+		}
+		m.db = nil
+		m.DB = nil
+	} else if ref < 0 {
+		panic("negative memdb ref")
+	}
+}
+
+// Get latest sequence number.
+func (db *DB) getSeq() uint64 {
+	return atomic.LoadUint64(&db.seq)
+}
+
+// Atomically adds delta to seq.
+func (db *DB) addSeq(delta uint64) {
+	atomic.AddUint64(&db.seq, delta)
+}
+
+func (db *DB) sampleSeek(ikey iKey) {
+	v := db.s.version()
+	if v.sampleSeek(ikey) {
+		// Trigger table compaction.
+		db.compSendTrigger(db.tcompCmdC)
+	}
+	v.release()
+}
+
+func (db *DB) mpoolPut(mem *memdb.DB) {
+	defer func() {
+		recover()
+	}()
+	select {
+	case db.memPool <- mem:
+	default:
+	}
+}
+
+func (db *DB) mpoolGet() *memdb.DB {
+	select {
+	case mem := <-db.memPool:
+		return mem
+	default:
+		return nil
+	}
+}
+
+func (db *DB) mpoolDrain() {
+	ticker := time.NewTicker(30 * time.Second)
+	for {
+		select {
+		case <-ticker.C:
+			select {
+			case <-db.memPool:
+			default:
+			}
+		case _, _ = <-db.closeC:
+			close(db.memPool)
+			return
+		}
+	}
+}
+
+// Create new memdb and froze the old one; need external synchronization.
+// newMem only called synchronously by the writer.
+func (db *DB) newMem(n int) (mem *memDB, err error) {
+	num := db.s.allocFileNum()
+	file := db.s.getJournalFile(num)
+	w, err := file.Create()
+	if err != nil {
+		db.s.reuseFileNum(num)
+		return
+	}
+
+	db.memMu.Lock()
+	defer db.memMu.Unlock()
+
+	if db.frozenMem != nil {
+		panic("still has frozen mem")
+	}
+
+	if db.journal == nil {
+		db.journal = journal.NewWriter(w)
+	} else {
+		db.journal.Reset(w)
+		db.journalWriter.Close()
+		db.frozenJournalFile = db.journalFile
+	}
+	db.journalWriter = w
+	db.journalFile = file
+	db.frozenMem = db.mem
+	mdb := db.mpoolGet()
+	if mdb == nil || mdb.Capacity() < n {
+		mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
+	}
+	mem = &memDB{
+		db:  db,
+		DB:  mdb,
+		ref: 2,
+	}
+	db.mem = mem
+	// The seq only incremented by the writer. And whoever called newMem
+	// should hold write lock, so no need additional synchronization here.
+	db.frozenSeq = db.seq
+	return
+}
+
+// Get all memdbs.
+func (db *DB) getMems() (e, f *memDB) {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	if db.mem == nil {
+		panic("nil effective mem")
+	}
+	db.mem.incref()
+	if db.frozenMem != nil {
+		db.frozenMem.incref()
+	}
+	return db.mem, db.frozenMem
+}
+
+// Get frozen memdb.
+func (db *DB) getEffectiveMem() *memDB {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	if db.mem == nil {
+		panic("nil effective mem")
+	}
+	db.mem.incref()
+	return db.mem
+}
+
+// Check whether we has frozen memdb.
+func (db *DB) hasFrozenMem() bool {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	return db.frozenMem != nil
+}
+
+// Get frozen memdb.
+func (db *DB) getFrozenMem() *memDB {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	if db.frozenMem != nil {
+		db.frozenMem.incref()
+	}
+	return db.frozenMem
+}
+
+// Drop frozen memdb; assume that frozen memdb isn't nil.
+func (db *DB) dropFrozenMem() {
+	db.memMu.Lock()
+	if err := db.frozenJournalFile.Remove(); err != nil {
+		db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
+	} else {
+		db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
+	}
+	db.frozenJournalFile = nil
+	db.frozenMem.decref()
+	db.frozenMem = nil
+	db.memMu.Unlock()
+}
+
+// Set closed flag; return true if not already closed.
+func (db *DB) setClosed() bool {
+	return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
+}
+
+// Check whether DB was closed.
+func (db *DB) isClosed() bool {
+	return atomic.LoadUint32(&db.closed) != 0
+}
+
+// Check read ok status.
+func (db *DB) ok() error {
+	if db.isClosed() {
+		return ErrClosed
+	}
+	return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
new file mode 100644
index 0000000..f2d57b2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
@@ -0,0 +1,2701 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"bytes"
+	"container/list"
+	crand "crypto/rand"
+	"encoding/binary"
+	"fmt"
+	"math/rand"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"testing"
+	"time"
+	"unsafe"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func tkey(i int) []byte {
+	return []byte(fmt.Sprintf("%016d", i))
+}
+
+func tval(seed, n int) []byte {
+	r := rand.New(rand.NewSource(int64(seed)))
+	return randomString(r, n)
+}
+
+type dbHarness struct {
+	t *testing.T
+
+	stor *testStorage
+	db   *DB
+	o    *opt.Options
+	ro   *opt.ReadOptions
+	wo   *opt.WriteOptions
+}
+
+func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness {
+	h := new(dbHarness)
+	h.init(t, o)
+	return h
+}
+
+func newDbHarness(t *testing.T) *dbHarness {
+	return newDbHarnessWopt(t, &opt.Options{})
+}
+
+func (h *dbHarness) init(t *testing.T, o *opt.Options) {
+	h.t = t
+	h.stor = newTestStorage(t)
+	h.o = o
+	h.ro = nil
+	h.wo = nil
+
+	if err := h.openDB0(); err != nil {
+		// So that it will come after fatal message.
+		defer h.stor.Close()
+		h.t.Fatal("Open (init): got error: ", err)
+	}
+}
+
+func (h *dbHarness) openDB0() (err error) {
+	h.t.Log("opening DB")
+	h.db, err = Open(h.stor, h.o)
+	return
+}
+
+func (h *dbHarness) openDB() {
+	if err := h.openDB0(); err != nil {
+		h.t.Fatal("Open: got error: ", err)
+	}
+}
+
+func (h *dbHarness) closeDB0() error {
+	h.t.Log("closing DB")
+	return h.db.Close()
+}
+
+func (h *dbHarness) closeDB() {
+	if err := h.closeDB0(); err != nil {
+		h.t.Error("Close: got error: ", err)
+	}
+	h.stor.CloseCheck()
+	runtime.GC()
+}
+
+func (h *dbHarness) reopenDB() {
+	h.closeDB()
+	h.openDB()
+}
+
+func (h *dbHarness) close() {
+	h.closeDB0()
+	h.db = nil
+	h.stor.Close()
+	h.stor = nil
+	runtime.GC()
+}
+
+func (h *dbHarness) openAssert(want bool) {
+	db, err := Open(h.stor, h.o)
+	if err != nil {
+		if want {
+			h.t.Error("Open: assert: got error: ", err)
+		} else {
+			h.t.Log("Open: assert: got error (expected): ", err)
+		}
+	} else {
+		if !want {
+			h.t.Error("Open: assert: expect error")
+		}
+		db.Close()
+	}
+}
+
+func (h *dbHarness) write(batch *Batch) {
+	if err := h.db.Write(batch, h.wo); err != nil {
+		h.t.Error("Write: got error: ", err)
+	}
+}
+
+func (h *dbHarness) put(key, value string) {
+	if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil {
+		h.t.Error("Put: got error: ", err)
+	}
+}
+
+func (h *dbHarness) putMulti(n int, low, hi string) {
+	for i := 0; i < n; i++ {
+		h.put(low, "begin")
+		h.put(hi, "end")
+		h.compactMem()
+	}
+}
+
+func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
+	t := h.t
+	db := h.db
+
+	var (
+		maxOverlaps uint64
+		maxLevel    int
+	)
+	v := db.s.version()
+	for i, tt := range v.tables[1 : len(v.tables)-1] {
+		level := i + 1
+		next := v.tables[level+1]
+		for _, t := range tt {
+			r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false)
+			sum := r.size()
+			if sum > maxOverlaps {
+				maxOverlaps = sum
+				maxLevel = level
+			}
+		}
+	}
+	v.release()
+
+	if maxOverlaps > want {
+		t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel)
+	} else {
+		t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want)
+	}
+}
+
+func (h *dbHarness) delete(key string) {
+	t := h.t
+	db := h.db
+
+	err := db.Delete([]byte(key), h.wo)
+	if err != nil {
+		t.Error("Delete: got error: ", err)
+	}
+}
+
+func (h *dbHarness) assertNumKeys(want int) {
+	iter := h.db.NewIterator(nil, h.ro)
+	defer iter.Release()
+	got := 0
+	for iter.Next() {
+		got++
+	}
+	if err := iter.Error(); err != nil {
+		h.t.Error("assertNumKeys: ", err)
+	}
+	if want != got {
+		h.t.Errorf("assertNumKeys: want=%d got=%d", want, got)
+	}
+}
+
+func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) {
+	t := h.t
+	v, err := db.Get([]byte(key), h.ro)
+	switch err {
+	case ErrNotFound:
+		if expectFound {
+			t.Errorf("Get: key '%s' not found, want found", key)
+		}
+	case nil:
+		found = true
+		if !expectFound {
+			t.Errorf("Get: key '%s' found, want not found", key)
+		}
+	default:
+		t.Error("Get: got error: ", err)
+	}
+	return
+}
+
+func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) {
+	return h.getr(h.db, key, expectFound)
+}
+
+func (h *dbHarness) getValr(db Reader, key, value string) {
+	t := h.t
+	found, r := h.getr(db, key, true)
+	if !found {
+		return
+	}
+	rval := string(r)
+	if rval != value {
+		t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value)
+	}
+}
+
+func (h *dbHarness) getVal(key, value string) {
+	h.getValr(h.db, key, value)
+}
+
+func (h *dbHarness) allEntriesFor(key, want string) {
+	t := h.t
+	db := h.db
+	s := db.s
+
+	ikey := newIkey([]byte(key), kMaxSeq, ktVal)
+	iter := db.newRawIterator(nil, nil)
+	if !iter.Seek(ikey) && iter.Error() != nil {
+		t.Error("AllEntries: error during seek, err: ", iter.Error())
+		return
+	}
+	res := "[ "
+	first := true
+	for iter.Valid() {
+		if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil {
+			if s.icmp.uCompare(ikey.ukey(), ukey) != 0 {
+				break
+			}
+			if !first {
+				res += ", "
+			}
+			first = false
+			switch kt {
+			case ktVal:
+				res += string(iter.Value())
+			case ktDel:
+				res += "DEL"
+			}
+		} else {
+			if !first {
+				res += ", "
+			}
+			first = false
+			res += "CORRUPTED"
+		}
+		iter.Next()
+	}
+	if !first {
+		res += " "
+	}
+	res += "]"
+	if res != want {
+		t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want)
+	}
+}
+
+// Return a string that contains all key,value pairs in order,
+// formatted like "(k1->v1)(k2->v2)".
+func (h *dbHarness) getKeyVal(want string) {
+	t := h.t
+	db := h.db
+
+	s, err := db.GetSnapshot()
+	if err != nil {
+		t.Fatal("GetSnapshot: got error: ", err)
+	}
+	res := ""
+	iter := s.NewIterator(nil, nil)
+	for iter.Next() {
+		res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value()))
+	}
+	iter.Release()
+
+	if res != want {
+		t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want)
+	}
+	s.Release()
+}
+
+func (h *dbHarness) waitCompaction() {
+	t := h.t
+	db := h.db
+	if err := db.compSendIdle(db.tcompCmdC); err != nil {
+		t.Error("compaction error: ", err)
+	}
+}
+
+func (h *dbHarness) waitMemCompaction() {
+	t := h.t
+	db := h.db
+
+	if err := db.compSendIdle(db.mcompCmdC); err != nil {
+		t.Error("compaction error: ", err)
+	}
+}
+
+func (h *dbHarness) compactMem() {
+	t := h.t
+	db := h.db
+
+	t.Log("starting memdb compaction")
+
+	db.writeLockC <- struct{}{}
+	defer func() {
+		<-db.writeLockC
+	}()
+
+	if _, err := db.rotateMem(0); err != nil {
+		t.Error("compaction error: ", err)
+	}
+	if err := db.compSendIdle(db.mcompCmdC); err != nil {
+		t.Error("compaction error: ", err)
+	}
+
+	if h.totalTables() == 0 {
+		t.Error("zero tables after mem compaction")
+	}
+
+	t.Log("memdb compaction done")
+}
+
+func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) {
+	t := h.t
+	db := h.db
+
+	var _min, _max []byte
+	if min != "" {
+		_min = []byte(min)
+	}
+	if max != "" {
+		_max = []byte(max)
+	}
+
+	t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max)
+
+	if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil {
+		if wanterr {
+			t.Log("CompactRangeAt: got error (expected): ", err)
+		} else {
+			t.Error("CompactRangeAt: got error: ", err)
+		}
+	} else if wanterr {
+		t.Error("CompactRangeAt: expect error")
+	}
+
+	t.Log("table range compaction done")
+}
+
+func (h *dbHarness) compactRangeAt(level int, min, max string) {
+	h.compactRangeAtErr(level, min, max, false)
+}
+
+func (h *dbHarness) compactRange(min, max string) {
+	t := h.t
+	db := h.db
+
+	t.Logf("starting DB range compaction: min=%q, max=%q", min, max)
+
+	var r util.Range
+	if min != "" {
+		r.Start = []byte(min)
+	}
+	if max != "" {
+		r.Limit = []byte(max)
+	}
+	if err := db.CompactRange(r); err != nil {
+		t.Error("CompactRange: got error: ", err)
+	}
+
+	t.Log("DB range compaction done")
+}
+
+func (h *dbHarness) sizeOf(start, limit string) uint64 {
+	sz, err := h.db.SizeOf([]util.Range{
+		{[]byte(start), []byte(limit)},
+	})
+	if err != nil {
+		h.t.Error("SizeOf: got error: ", err)
+	}
+	return sz.Sum()
+}
+
+func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
+	sz := h.sizeOf(start, limit)
+	if sz < low || sz > hi {
+		h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
+			shorten(start), shorten(limit), low, hi, sz)
+	}
+}
+
+func (h *dbHarness) getSnapshot() (s *Snapshot) {
+	s, err := h.db.GetSnapshot()
+	if err != nil {
+		h.t.Fatal("GetSnapshot: got error: ", err)
+	}
+	return
+}
+func (h *dbHarness) tablesPerLevel(want string) {
+	res := ""
+	nz := 0
+	v := h.db.s.version()
+	for level, tt := range v.tables {
+		if level > 0 {
+			res += ","
+		}
+		res += fmt.Sprint(len(tt))
+		if len(tt) > 0 {
+			nz = len(res)
+		}
+	}
+	v.release()
+	res = res[:nz]
+	if res != want {
+		h.t.Errorf("invalid tables len, want=%s, got=%s", want, res)
+	}
+}
+
+func (h *dbHarness) totalTables() (n int) {
+	v := h.db.s.version()
+	for _, tt := range v.tables {
+		n += len(tt)
+	}
+	v.release()
+	return
+}
+
+type keyValue interface {
+	Key() []byte
+	Value() []byte
+}
+
+func testKeyVal(t *testing.T, kv keyValue, want string) {
+	res := string(kv.Key()) + "->" + string(kv.Value())
+	if res != want {
+		t.Errorf("invalid key/value, want=%q, got=%q", want, res)
+	}
+}
+
+func numKey(num int) string {
+	return fmt.Sprintf("key%06d", num)
+}
+
+var _bloom_filter = filter.NewBloomFilter(10)
+
+func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) {
+	for i := 0; i < 4; i++ {
+		func() {
+			switch i {
+			case 0:
+			case 1:
+				if o == nil {
+					o = &opt.Options{Filter: _bloom_filter}
+				} else {
+					old := o
+					o = &opt.Options{}
+					*o = *old
+					o.Filter = _bloom_filter
+				}
+			case 2:
+				if o == nil {
+					o = &opt.Options{Compression: opt.NoCompression}
+				} else {
+					old := o
+					o = &opt.Options{}
+					*o = *old
+					o.Compression = opt.NoCompression
+				}
+			}
+			h := newDbHarnessWopt(t, o)
+			defer h.close()
+			switch i {
+			case 3:
+				h.reopenDB()
+			}
+			f(h)
+		}()
+	}
+}
+
+func trun(t *testing.T, f func(h *dbHarness)) {
+	truno(t, nil, f)
+}
+
+func testAligned(t *testing.T, name string, offset uintptr) {
+	if offset%8 != 0 {
+		t.Errorf("field %s offset is not 64-bit aligned", name)
+	}
+}
+
+func Test_FieldsAligned(t *testing.T) {
+	p1 := new(DB)
+	testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq))
+	p2 := new(session)
+	testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum))
+	testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum))
+	testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum))
+	testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum))
+}
+
+func TestDB_Locking(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.stor.Close()
+	h.openAssert(false)
+	h.closeDB()
+	h.openAssert(true)
+}
+
+func TestDB_Empty(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.get("foo", false)
+
+		h.reopenDB()
+		h.get("foo", false)
+	})
+}
+
+func TestDB_ReadWrite(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("foo", "v1")
+		h.getVal("foo", "v1")
+		h.put("bar", "v2")
+		h.put("foo", "v3")
+		h.getVal("foo", "v3")
+		h.getVal("bar", "v2")
+
+		h.reopenDB()
+		h.getVal("foo", "v3")
+		h.getVal("bar", "v2")
+	})
+}
+
+func TestDB_PutDeleteGet(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("foo", "v1")
+		h.getVal("foo", "v1")
+		h.put("foo", "v2")
+		h.getVal("foo", "v2")
+		h.delete("foo")
+		h.get("foo", false)
+
+		h.reopenDB()
+		h.get("foo", false)
+	})
+}
+
+func TestDB_EmptyBatch(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.get("foo", false)
+	err := h.db.Write(new(Batch), h.wo)
+	if err != nil {
+		t.Error("writing empty batch yield error: ", err)
+	}
+	h.get("foo", false)
+}
+
+func TestDB_GetFromFrozen(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100})
+	defer h.close()
+
+	h.put("foo", "v1")
+	h.getVal("foo", "v1")
+
+	h.stor.DelaySync(storage.TypeTable)      // Block sync calls
+	h.put("k1", strings.Repeat("x", 100000)) // Fill memtable
+	h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction
+	for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ {
+		time.Sleep(10 * time.Microsecond)
+	}
+	if h.db.getFrozenMem() == nil {
+		h.stor.ReleaseSync(storage.TypeTable)
+		t.Fatal("No frozen mem")
+	}
+	h.getVal("foo", "v1")
+	h.stor.ReleaseSync(storage.TypeTable) // Release sync calls
+
+	h.reopenDB()
+	h.getVal("foo", "v1")
+	h.get("k1", true)
+	h.get("k2", true)
+}
+
+func TestDB_GetFromTable(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("foo", "v1")
+		h.compactMem()
+		h.getVal("foo", "v1")
+	})
+}
+
+func TestDB_GetSnapshot(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		bar := strings.Repeat("b", 200)
+		h.put("foo", "v1")
+		h.put(bar, "v1")
+
+		snap, err := h.db.GetSnapshot()
+		if err != nil {
+			t.Fatal("GetSnapshot: got error: ", err)
+		}
+
+		h.put("foo", "v2")
+		h.put(bar, "v2")
+
+		h.getVal("foo", "v2")
+		h.getVal(bar, "v2")
+		h.getValr(snap, "foo", "v1")
+		h.getValr(snap, bar, "v1")
+
+		h.compactMem()
+
+		h.getVal("foo", "v2")
+		h.getVal(bar, "v2")
+		h.getValr(snap, "foo", "v1")
+		h.getValr(snap, bar, "v1")
+
+		snap.Release()
+
+		h.reopenDB()
+		h.getVal("foo", "v2")
+		h.getVal(bar, "v2")
+	})
+}
+
+func TestDB_GetLevel0Ordering(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		for i := 0; i < 4; i++ {
+			h.put("bar", fmt.Sprintf("b%d", i))
+			h.put("foo", fmt.Sprintf("v%d", i))
+			h.compactMem()
+		}
+		h.getVal("foo", "v3")
+		h.getVal("bar", "b3")
+
+		v := h.db.s.version()
+		t0len := v.tLen(0)
+		v.release()
+		if t0len < 2 {
+			t.Errorf("level-0 tables is less than 2, got %d", t0len)
+		}
+
+		h.reopenDB()
+		h.getVal("foo", "v3")
+		h.getVal("bar", "b3")
+	})
+}
+
+func TestDB_GetOrderedByLevels(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("foo", "v1")
+		h.compactMem()
+		h.compactRange("a", "z")
+		h.getVal("foo", "v1")
+		h.put("foo", "v2")
+		h.compactMem()
+		h.getVal("foo", "v2")
+	})
+}
+
+func TestDB_GetPicksCorrectFile(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		// Arrange to have multiple files in a non-level-0 level.
+		h.put("a", "va")
+		h.compactMem()
+		h.compactRange("a", "b")
+		h.put("x", "vx")
+		h.compactMem()
+		h.compactRange("x", "y")
+		h.put("f", "vf")
+		h.compactMem()
+		h.compactRange("f", "g")
+
+		h.getVal("a", "va")
+		h.getVal("f", "vf")
+		h.getVal("x", "vx")
+
+		h.compactRange("", "")
+		h.getVal("a", "va")
+		h.getVal("f", "vf")
+		h.getVal("x", "vx")
+	})
+}
+
+func TestDB_GetEncountersEmptyLevel(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		// Arrange for the following to happen:
+		//   * sstable A in level 0
+		//   * nothing in level 1
+		//   * sstable B in level 2
+		// Then do enough Get() calls to arrange for an automatic compaction
+		// of sstable A.  A bug would cause the compaction to be marked as
+		// occuring at level 1 (instead of the correct level 0).
+
+		// Step 1: First place sstables in levels 0 and 2
+		for i := 0; ; i++ {
+			if i >= 100 {
+				t.Fatal("could not fill levels-0 and level-2")
+			}
+			v := h.db.s.version()
+			if v.tLen(0) > 0 && v.tLen(2) > 0 {
+				v.release()
+				break
+			}
+			v.release()
+			h.put("a", "begin")
+			h.put("z", "end")
+			h.compactMem()
+
+			h.getVal("a", "begin")
+			h.getVal("z", "end")
+		}
+
+		// Step 2: clear level 1 if necessary.
+		h.compactRangeAt(1, "", "")
+		h.tablesPerLevel("1,0,1")
+
+		h.getVal("a", "begin")
+		h.getVal("z", "end")
+
+		// Step 3: read a bunch of times
+		for i := 0; i < 200; i++ {
+			h.get("missing", false)
+		}
+
+		// Step 4: Wait for compaction to finish
+		h.waitCompaction()
+
+		v := h.db.s.version()
+		if v.tLen(0) > 0 {
+			t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
+		}
+		v.release()
+
+		h.getVal("a", "begin")
+		h.getVal("z", "end")
+	})
+}
+
+func TestDB_IterMultiWithDelete(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("a", "va")
+		h.put("b", "vb")
+		h.put("c", "vc")
+		h.delete("b")
+		h.get("b", false)
+
+		iter := h.db.NewIterator(nil, nil)
+		iter.Seek([]byte("c"))
+		testKeyVal(t, iter, "c->vc")
+		iter.Prev()
+		testKeyVal(t, iter, "a->va")
+		iter.Release()
+
+		h.compactMem()
+
+		iter = h.db.NewIterator(nil, nil)
+		iter.Seek([]byte("c"))
+		testKeyVal(t, iter, "c->vc")
+		iter.Prev()
+		testKeyVal(t, iter, "a->va")
+		iter.Release()
+	})
+}
+
+func TestDB_IteratorPinsRef(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.put("foo", "hello")
+
+	// Get iterator that will yield the current contents of the DB.
+	iter := h.db.NewIterator(nil, nil)
+
+	// Write to force compactions
+	h.put("foo", "newvalue1")
+	for i := 0; i < 100; i++ {
+		h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
+	}
+	h.put("foo", "newvalue2")
+
+	iter.First()
+	testKeyVal(t, iter, "foo->hello")
+	if iter.Next() {
+		t.Errorf("expect eof")
+	}
+	iter.Release()
+}
+
+func TestDB_Recover(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("foo", "v1")
+		h.put("baz", "v5")
+
+		h.reopenDB()
+		h.getVal("foo", "v1")
+
+		h.getVal("foo", "v1")
+		h.getVal("baz", "v5")
+		h.put("bar", "v2")
+		h.put("foo", "v3")
+
+		h.reopenDB()
+		h.getVal("foo", "v3")
+		h.put("foo", "v4")
+		h.getVal("foo", "v4")
+		h.getVal("bar", "v2")
+		h.getVal("baz", "v5")
+	})
+}
+
+func TestDB_RecoverWithEmptyJournal(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("foo", "v1")
+		h.put("foo", "v2")
+
+		h.reopenDB()
+		h.reopenDB()
+		h.put("foo", "v3")
+
+		h.reopenDB()
+		h.getVal("foo", "v3")
+	})
+}
+
+func TestDB_RecoverDuringMemtableCompaction(t *testing.T) {
+	truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) {
+
+		h.stor.DelaySync(storage.TypeTable)
+		h.put("big1", strings.Repeat("x", 10000000))
+		h.put("big2", strings.Repeat("y", 1000))
+		h.put("bar", "v2")
+		h.stor.ReleaseSync(storage.TypeTable)
+
+		h.reopenDB()
+		h.getVal("bar", "v2")
+		h.getVal("big1", strings.Repeat("x", 10000000))
+		h.getVal("big2", strings.Repeat("y", 1000))
+	})
+}
+
+func TestDB_MinorCompactionsHappen(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000})
+	defer h.close()
+
+	n := 500
+
+	key := func(i int) string {
+		return fmt.Sprintf("key%06d", i)
+	}
+
+	for i := 0; i < n; i++ {
+		h.put(key(i), key(i)+strings.Repeat("v", 1000))
+	}
+
+	for i := 0; i < n; i++ {
+		h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
+	}
+
+	h.reopenDB()
+	for i := 0; i < n; i++ {
+		h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
+	}
+}
+
+func TestDB_RecoverWithLargeJournal(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.put("big1", strings.Repeat("1", 200000))
+	h.put("big2", strings.Repeat("2", 200000))
+	h.put("small3", strings.Repeat("3", 10))
+	h.put("small4", strings.Repeat("4", 10))
+	h.tablesPerLevel("")
+
+	// Make sure that if we re-open with a small write buffer size that
+	// we flush table files in the middle of a large journal file.
+	h.o.WriteBuffer = 100000
+	h.reopenDB()
+	h.getVal("big1", strings.Repeat("1", 200000))
+	h.getVal("big2", strings.Repeat("2", 200000))
+	h.getVal("small3", strings.Repeat("3", 10))
+	h.getVal("small4", strings.Repeat("4", 10))
+	v := h.db.s.version()
+	if v.tLen(0) <= 1 {
+		t.Errorf("tables-0 less than one")
+	}
+	v.release()
+}
+
+func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer: 10000000,
+		Compression: opt.NoCompression,
+	})
+	defer h.close()
+
+	v := h.db.s.version()
+	if v.tLen(0) > 0 {
+		t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
+	}
+	v.release()
+
+	n := 80
+
+	// Write 8MB (80 values, each 100K)
+	for i := 0; i < n; i++ {
+		h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
+	}
+
+	// Reopening moves updates to level-0
+	h.reopenDB()
+	h.compactRangeAt(0, "", "")
+
+	v = h.db.s.version()
+	if v.tLen(0) > 0 {
+		t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
+	}
+	if v.tLen(1) <= 1 {
+		t.Errorf("level-1 tables less than 1, got %d", v.tLen(1))
+	}
+	v.release()
+
+	for i := 0; i < n; i++ {
+		h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
+	}
+}
+
+func TestDB_RepeatedWritesToSameKey(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
+	defer h.close()
+
+	maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
+
+	value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
+	for i := 0; i < 5*maxTables; i++ {
+		h.put("key", value)
+		n := h.totalTables()
+		if n > maxTables {
+			t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
+		}
+	}
+}
+
+func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
+	defer h.close()
+
+	h.reopenDB()
+
+	maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
+
+	value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
+	for i := 0; i < 5*maxTables; i++ {
+		h.put("key", value)
+		n := h.totalTables()
+		if n > maxTables {
+			t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
+		}
+	}
+}
+
+func TestDB_SparseMerge(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
+	defer h.close()
+
+	h.putMulti(h.o.GetNumLevel(), "A", "Z")
+
+	// Suppose there is:
+	//    small amount of data with prefix A
+	//    large amount of data with prefix B
+	//    small amount of data with prefix C
+	// and that recent updates have made small changes to all three prefixes.
+	// Check that we do not do a compaction that merges all of B in one shot.
+	h.put("A", "va")
+	value := strings.Repeat("x", 1000)
+	for i := 0; i < 100000; i++ {
+		h.put(fmt.Sprintf("B%010d", i), value)
+	}
+	h.put("C", "vc")
+	h.compactMem()
+	h.compactRangeAt(0, "", "")
+	h.waitCompaction()
+
+	// Make sparse update
+	h.put("A", "va2")
+	h.put("B100", "bvalue2")
+	h.put("C", "vc2")
+	h.compactMem()
+
+	h.waitCompaction()
+	h.maxNextLevelOverlappingBytes(20 * 1048576)
+	h.compactRangeAt(0, "", "")
+	h.waitCompaction()
+	h.maxNextLevelOverlappingBytes(20 * 1048576)
+	h.compactRangeAt(1, "", "")
+	h.waitCompaction()
+	h.maxNextLevelOverlappingBytes(20 * 1048576)
+}
+
+func TestDB_SizeOf(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		Compression: opt.NoCompression,
+		WriteBuffer: 10000000,
+	})
+	defer h.close()
+
+	h.sizeAssert("", "xyz", 0, 0)
+	h.reopenDB()
+	h.sizeAssert("", "xyz", 0, 0)
+
+	// Write 8MB (80 values, each 100K)
+	n := 80
+	s1 := 100000
+	s2 := 105000
+
+	for i := 0; i < n; i++ {
+		h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10))
+	}
+
+	// 0 because SizeOf() does not account for memtable space
+	h.sizeAssert("", numKey(50), 0, 0)
+
+	for r := 0; r < 3; r++ {
+		h.reopenDB()
+
+		for cs := 0; cs < n; cs += 10 {
+			for i := 0; i < n; i += 10 {
+				h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i))
+				h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1)))
+				h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10))
+			}
+
+			h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50))
+			h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50))
+
+			h.compactRangeAt(0, numKey(cs), numKey(cs+9))
+		}
+
+		v := h.db.s.version()
+		if v.tLen(0) != 0 {
+			t.Errorf("level-0 tables was not zero, got %d", v.tLen(0))
+		}
+		if v.tLen(1) == 0 {
+			t.Error("level-1 tables was zero")
+		}
+		v.release()
+	}
+}
+
+func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
+	defer h.close()
+
+	sizes := []uint64{
+		10000,
+		10000,
+		100000,
+		10000,
+		100000,
+		10000,
+		300000,
+		10000,
+	}
+
+	for i, n := range sizes {
+		h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10))
+	}
+
+	for r := 0; r < 3; r++ {
+		h.reopenDB()
+
+		var x uint64
+		for i, n := range sizes {
+			y := x
+			if i > 0 {
+				y += 1000
+			}
+			h.sizeAssert("", numKey(i), x, y)
+			x += n
+		}
+
+		h.sizeAssert(numKey(3), numKey(5), 110000, 111000)
+
+		h.compactRangeAt(0, "", "")
+	}
+}
+
+func TestDB_Snapshot(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		h.put("foo", "v1")
+		s1 := h.getSnapshot()
+		h.put("foo", "v2")
+		s2 := h.getSnapshot()
+		h.put("foo", "v3")
+		s3 := h.getSnapshot()
+		h.put("foo", "v4")
+
+		h.getValr(s1, "foo", "v1")
+		h.getValr(s2, "foo", "v2")
+		h.getValr(s3, "foo", "v3")
+		h.getVal("foo", "v4")
+
+		s3.Release()
+		h.getValr(s1, "foo", "v1")
+		h.getValr(s2, "foo", "v2")
+		h.getVal("foo", "v4")
+
+		s1.Release()
+		h.getValr(s2, "foo", "v2")
+		h.getVal("foo", "v4")
+
+		s2.Release()
+		h.getVal("foo", "v4")
+	})
+}
+
+func TestDB_SnapshotList(t *testing.T) {
+	db := &DB{snapsList: list.New()}
+	e0a := db.acquireSnapshot()
+	e0b := db.acquireSnapshot()
+	db.seq = 1
+	e1 := db.acquireSnapshot()
+	db.seq = 2
+	e2 := db.acquireSnapshot()
+
+	if db.minSeq() != 0 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e0a)
+	if db.minSeq() != 0 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e2)
+	if db.minSeq() != 0 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e0b)
+	if db.minSeq() != 1 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	e2 = db.acquireSnapshot()
+	if db.minSeq() != 1 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e1)
+	if db.minSeq() != 2 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e2)
+	if db.minSeq() != 2 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+}
+
+func TestDB_HiddenValuesAreRemoved(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		s := h.db.s
+
+		h.put("foo", "v1")
+		h.compactMem()
+		m := h.o.GetMaxMemCompationLevel()
+		v := s.version()
+		num := v.tLen(m)
+		v.release()
+		if num != 1 {
+			t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
+		}
+
+		// Place a table at level last-1 to prevent merging with preceding mutation
+		h.put("a", "begin")
+		h.put("z", "end")
+		h.compactMem()
+		v = s.version()
+		if v.tLen(m) != 1 {
+			t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
+		}
+		if v.tLen(m-1) != 1 {
+			t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
+		}
+		v.release()
+
+		h.delete("foo")
+		h.put("foo", "v2")
+		h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
+		h.compactMem()
+		h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
+		h.compactRangeAt(m-2, "", "z")
+		// DEL eliminated, but v1 remains because we aren't compacting that level
+		// (DEL can be eliminated because v2 hides v1).
+		h.allEntriesFor("foo", "[ v2, v1 ]")
+		h.compactRangeAt(m-1, "", "")
+		// Merging last-1 w/ last, so we are the base level for "foo", so
+		// DEL is removed.  (as is v1).
+		h.allEntriesFor("foo", "[ v2 ]")
+	})
+}
+
+func TestDB_DeletionMarkers2(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+	s := h.db.s
+
+	h.put("foo", "v1")
+	h.compactMem()
+	m := h.o.GetMaxMemCompationLevel()
+	v := s.version()
+	num := v.tLen(m)
+	v.release()
+	if num != 1 {
+		t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
+	}
+
+	// Place a table at level last-1 to prevent merging with preceding mutation
+	h.put("a", "begin")
+	h.put("z", "end")
+	h.compactMem()
+	v = s.version()
+	if v.tLen(m) != 1 {
+		t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
+	}
+	if v.tLen(m-1) != 1 {
+		t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
+	}
+	v.release()
+
+	h.delete("foo")
+	h.allEntriesFor("foo", "[ DEL, v1 ]")
+	h.compactMem() // Moves to level last-2
+	h.allEntriesFor("foo", "[ DEL, v1 ]")
+	h.compactRangeAt(m-2, "", "")
+	// DEL kept: "last" file overlaps
+	h.allEntriesFor("foo", "[ DEL, v1 ]")
+	h.compactRangeAt(m-1, "", "")
+	// Merging last-1 w/ last, so we are the base level for "foo", so
+	// DEL is removed.  (as is v1).
+	h.allEntriesFor("foo", "[ ]")
+}
+
+func TestDB_CompactionTableOpenError(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1})
+	defer h.close()
+
+	im := 10
+	jm := 10
+	for r := 0; r < 2; r++ {
+		for i := 0; i < im; i++ {
+			for j := 0; j < jm; j++ {
+				h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
+			}
+			h.compactMem()
+		}
+	}
+
+	if n := h.totalTables(); n != im*2 {
+		t.Errorf("total tables is %d, want %d", n, im)
+	}
+
+	h.stor.SetEmuErr(storage.TypeTable, tsOpOpen)
+	go h.db.CompactRange(util.Range{})
+	if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil {
+		t.Log("compaction error: ", err)
+	}
+	h.closeDB0()
+	h.openDB()
+	h.stor.SetEmuErr(0, tsOpOpen)
+
+	for i := 0; i < im; i++ {
+		for j := 0; j < jm; j++ {
+			h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
+		}
+	}
+}
+
+func TestDB_OverlapInLevel0(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		if h.o.GetMaxMemCompationLevel() != 2 {
+			t.Fatal("fix test to reflect the config")
+		}
+
+		// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
+		h.put("100", "v100")
+		h.put("999", "v999")
+		h.compactMem()
+		h.delete("100")
+		h.delete("999")
+		h.compactMem()
+		h.tablesPerLevel("0,1,1")
+
+		// Make files spanning the following ranges in level-0:
+		//  files[0]  200 .. 900
+		//  files[1]  300 .. 500
+		// Note that files are sorted by min key.
+		h.put("300", "v300")
+		h.put("500", "v500")
+		h.compactMem()
+		h.put("200", "v200")
+		h.put("600", "v600")
+		h.put("900", "v900")
+		h.compactMem()
+		h.tablesPerLevel("2,1,1")
+
+		// Compact away the placeholder files we created initially
+		h.compactRangeAt(1, "", "")
+		h.compactRangeAt(2, "", "")
+		h.tablesPerLevel("2")
+
+		// Do a memtable compaction.  Before bug-fix, the compaction would
+		// not detect the overlap with level-0 files and would incorrectly place
+		// the deletion in a deeper level.
+		h.delete("600")
+		h.compactMem()
+		h.tablesPerLevel("3")
+		h.get("600", false)
+	})
+}
+
+func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.reopenDB()
+	h.put("b", "v")
+	h.reopenDB()
+	h.delete("b")
+	h.delete("a")
+	h.reopenDB()
+	h.delete("a")
+	h.reopenDB()
+	h.put("a", "v")
+	h.reopenDB()
+	h.reopenDB()
+	h.getKeyVal("(a->v)")
+	h.waitCompaction()
+	h.getKeyVal("(a->v)")
+}
+
+func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.reopenDB()
+	h.put("", "")
+	h.reopenDB()
+	h.delete("e")
+	h.put("", "")
+	h.reopenDB()
+	h.put("c", "cv")
+	h.reopenDB()
+	h.put("", "")
+	h.reopenDB()
+	h.put("", "")
+	h.waitCompaction()
+	h.reopenDB()
+	h.put("d", "dv")
+	h.reopenDB()
+	h.put("", "")
+	h.reopenDB()
+	h.delete("d")
+	h.delete("b")
+	h.reopenDB()
+	h.getKeyVal("(->)(c->cv)")
+	h.waitCompaction()
+	h.getKeyVal("(->)(c->cv)")
+}
+
+func TestDB_SingleEntryMemCompaction(t *testing.T) {
+	trun(t, func(h *dbHarness) {
+		for i := 0; i < 10; i++ {
+			h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer))
+			h.compactMem()
+			h.put("key", strings.Repeat("v", opt.DefaultBlockSize))
+			h.compactMem()
+			h.put("k", "v")
+			h.compactMem()
+			h.put("", "")
+			h.compactMem()
+			h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2))
+			h.compactMem()
+		}
+	})
+}
+
+func TestDB_ManifestWriteError(t *testing.T) {
+	for i := 0; i < 2; i++ {
+		func() {
+			h := newDbHarness(t)
+			defer h.close()
+
+			h.put("foo", "bar")
+			h.getVal("foo", "bar")
+
+			// Mem compaction (will succeed)
+			h.compactMem()
+			h.getVal("foo", "bar")
+			v := h.db.s.version()
+			if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 {
+				t.Errorf("invalid total tables, want=1 got=%d", n)
+			}
+			v.release()
+
+			if i == 0 {
+				h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite)
+			} else {
+				h.stor.SetEmuErr(storage.TypeManifest, tsOpSync)
+			}
+
+			// Merging compaction (will fail)
+			h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true)
+
+			h.db.Close()
+			h.stor.SetEmuErr(0, tsOpWrite)
+			h.stor.SetEmuErr(0, tsOpSync)
+
+			// Should not lose data
+			h.openDB()
+			h.getVal("foo", "bar")
+		}()
+	}
+}
+
+func assertErr(t *testing.T, err error, wanterr bool) {
+	if err != nil {
+		if wanterr {
+			t.Log("AssertErr: got error (expected): ", err)
+		} else {
+			t.Error("AssertErr: got error: ", err)
+		}
+	} else if wanterr {
+		t.Error("AssertErr: expect error")
+	}
+}
+
+func TestDB_ClosedIsClosed(t *testing.T) {
+	h := newDbHarness(t)
+	db := h.db
+
+	var iter, iter2 iterator.Iterator
+	var snap *Snapshot
+	func() {
+		defer h.close()
+
+		h.put("k", "v")
+		h.getVal("k", "v")
+
+		iter = db.NewIterator(nil, h.ro)
+		iter.Seek([]byte("k"))
+		testKeyVal(t, iter, "k->v")
+
+		var err error
+		snap, err = db.GetSnapshot()
+		if err != nil {
+			t.Fatal("GetSnapshot: got error: ", err)
+		}
+
+		h.getValr(snap, "k", "v")
+
+		iter2 = snap.NewIterator(nil, h.ro)
+		iter2.Seek([]byte("k"))
+		testKeyVal(t, iter2, "k->v")
+
+		h.put("foo", "v2")
+		h.delete("foo")
+
+		// closing DB
+		iter.Release()
+		iter2.Release()
+	}()
+
+	assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true)
+	_, err := db.Get([]byte("k"), h.ro)
+	assertErr(t, err, true)
+
+	if iter.Valid() {
+		t.Errorf("iter.Valid should false")
+	}
+	assertErr(t, iter.Error(), false)
+	testKeyVal(t, iter, "->")
+	if iter.Seek([]byte("k")) {
+		t.Errorf("iter.Seek should false")
+	}
+	assertErr(t, iter.Error(), true)
+
+	assertErr(t, iter2.Error(), false)
+
+	_, err = snap.Get([]byte("k"), h.ro)
+	assertErr(t, err, true)
+
+	_, err = db.GetSnapshot()
+	assertErr(t, err, true)
+
+	iter3 := db.NewIterator(nil, h.ro)
+	assertErr(t, iter3.Error(), true)
+
+	iter3 = snap.NewIterator(nil, h.ro)
+	assertErr(t, iter3.Error(), true)
+
+	assertErr(t, db.Delete([]byte("k"), h.wo), true)
+
+	_, err = db.GetProperty("leveldb.stats")
+	assertErr(t, err, true)
+
+	_, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}})
+	assertErr(t, err, true)
+
+	assertErr(t, db.CompactRange(util.Range{}), true)
+
+	assertErr(t, db.Close(), true)
+}
+
+type numberComparer struct{}
+
+func (numberComparer) num(x []byte) (n int) {
+	fmt.Sscan(string(x[1:len(x)-1]), &n)
+	return
+}
+
+func (numberComparer) Name() string {
+	return "test.NumberComparer"
+}
+
+func (p numberComparer) Compare(a, b []byte) int {
+	return p.num(a) - p.num(b)
+}
+
+func (numberComparer) Separator(dst, a, b []byte) []byte { return nil }
+func (numberComparer) Successor(dst, b []byte) []byte    { return nil }
+
+func TestDB_CustomComparer(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		Comparer:    numberComparer{},
+		WriteBuffer: 1000,
+	})
+	defer h.close()
+
+	h.put("[10]", "ten")
+	h.put("[0x14]", "twenty")
+	for i := 0; i < 2; i++ {
+		h.getVal("[10]", "ten")
+		h.getVal("[0xa]", "ten")
+		h.getVal("[20]", "twenty")
+		h.getVal("[0x14]", "twenty")
+		h.get("[15]", false)
+		h.get("[0xf]", false)
+		h.compactMem()
+		h.compactRange("[0]", "[9999]")
+	}
+
+	for n := 0; n < 2; n++ {
+		for i := 0; i < 100; i++ {
+			v := fmt.Sprintf("[%d]", i*10)
+			h.put(v, v)
+		}
+		h.compactMem()
+		h.compactRange("[0]", "[1000000]")
+	}
+}
+
+func TestDB_ManualCompaction(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	if h.o.GetMaxMemCompationLevel() != 2 {
+		t.Fatal("fix test to reflect the config")
+	}
+
+	h.putMulti(3, "p", "q")
+	h.tablesPerLevel("1,1,1")
+
+	// Compaction range falls before files
+	h.compactRange("", "c")
+	h.tablesPerLevel("1,1,1")
+
+	// Compaction range falls after files
+	h.compactRange("r", "z")
+	h.tablesPerLevel("1,1,1")
+
+	// Compaction range overlaps files
+	h.compactRange("p1", "p9")
+	h.tablesPerLevel("0,0,1")
+
+	// Populate a different range
+	h.putMulti(3, "c", "e")
+	h.tablesPerLevel("1,1,2")
+
+	// Compact just the new range
+	h.compactRange("b", "f")
+	h.tablesPerLevel("0,0,2")
+
+	// Compact all
+	h.putMulti(1, "a", "z")
+	h.tablesPerLevel("0,1,2")
+	h.compactRange("", "")
+	h.tablesPerLevel("0,0,1")
+}
+
+func TestDB_BloomFilter(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		DisableBlockCache: true,
+		Filter:            filter.NewBloomFilter(10),
+	})
+	defer h.close()
+
+	key := func(i int) string {
+		return fmt.Sprintf("key%06d", i)
+	}
+
+	const n = 10000
+
+	// Populate multiple layers
+	for i := 0; i < n; i++ {
+		h.put(key(i), key(i))
+	}
+	h.compactMem()
+	h.compactRange("a", "z")
+	for i := 0; i < n; i += 100 {
+		h.put(key(i), key(i))
+	}
+	h.compactMem()
+
+	// Prevent auto compactions triggered by seeks
+	h.stor.DelaySync(storage.TypeTable)
+
+	// Lookup present keys. Should rarely read from small sstable.
+	h.stor.SetReadCounter(storage.TypeTable)
+	for i := 0; i < n; i++ {
+		h.getVal(key(i), key(i))
+	}
+	cnt := int(h.stor.ReadCounter())
+	t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
+
+	if min, max := n, n+2*n/100; cnt < min || cnt > max {
+		t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
+	}
+
+	// Lookup missing keys. Should rarely read from either sstable.
+	h.stor.ResetReadCounter()
+	for i := 0; i < n; i++ {
+		h.get(key(i)+".missing", false)
+	}
+	cnt = int(h.stor.ReadCounter())
+	t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
+	if max := 3 * n / 100; cnt > max {
+		t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
+	}
+
+	h.stor.ReleaseSync(storage.TypeTable)
+}
+
+func TestDB_Concurrent(t *testing.T) {
+	const n, secs, maxkey = 4, 2, 1000
+
+	runtime.GOMAXPROCS(n)
+	trun(t, func(h *dbHarness) {
+		var closeWg sync.WaitGroup
+		var stop uint32
+		var cnt [n]uint32
+
+		for i := 0; i < n; i++ {
+			closeWg.Add(1)
+			go func(i int) {
+				var put, get, found uint
+				defer func() {
+					t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d",
+						i, cnt[i], put, get, found, get-found)
+					closeWg.Done()
+				}()
+
+				rnd := rand.New(rand.NewSource(int64(1000 + i)))
+				for atomic.LoadUint32(&stop) == 0 {
+					x := cnt[i]
+
+					k := rnd.Intn(maxkey)
+					kstr := fmt.Sprintf("%016d", k)
+
+					if (rnd.Int() % 2) > 0 {
+						put++
+						h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x))
+					} else {
+						get++
+						v, err := h.db.Get([]byte(kstr), h.ro)
+						if err == nil {
+							found++
+							rk, ri, rx := 0, -1, uint32(0)
+							fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx)
+							if rk != k {
+								t.Errorf("invalid key want=%d got=%d", k, rk)
+							}
+							if ri < 0 || ri >= n {
+								t.Error("invalid goroutine number: ", ri)
+							} else {
+								tx := atomic.LoadUint32(&(cnt[ri]))
+								if rx > tx {
+									t.Errorf("invalid seq number, %d > %d ", rx, tx)
+								}
+							}
+						} else if err != ErrNotFound {
+							t.Error("Get: got error: ", err)
+							return
+						}
+					}
+					atomic.AddUint32(&cnt[i], 1)
+				}
+			}(i)
+		}
+
+		time.Sleep(secs * time.Second)
+		atomic.StoreUint32(&stop, 1)
+		closeWg.Wait()
+	})
+
+	runtime.GOMAXPROCS(1)
+}
+
+func TestDB_Concurrent2(t *testing.T) {
+	const n, n2 = 4, 4000
+
+	runtime.GOMAXPROCS(n*2 + 2)
+	truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) {
+		var closeWg sync.WaitGroup
+		var stop uint32
+
+		for i := 0; i < n; i++ {
+			closeWg.Add(1)
+			go func(i int) {
+				for k := 0; atomic.LoadUint32(&stop) == 0; k++ {
+					h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
+				}
+				closeWg.Done()
+			}(i)
+		}
+
+		for i := 0; i < n; i++ {
+			closeWg.Add(1)
+			go func(i int) {
+				for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- {
+					h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
+				}
+				closeWg.Done()
+			}(i)
+		}
+
+		cmp := comparer.DefaultComparer
+		for i := 0; i < n2; i++ {
+			closeWg.Add(1)
+			go func(i int) {
+				it := h.db.NewIterator(nil, nil)
+				var pk []byte
+				for it.Next() {
+					kk := it.Key()
+					if cmp.Compare(kk, pk) <= 0 {
+						t.Errorf("iter %d: %q is successor of %q", i, pk, kk)
+					}
+					pk = append(pk[:0], kk...)
+					var k, vk, vi int
+					if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil {
+						t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err)
+					} else if n < 1 {
+						t.Errorf("iter %d: Cannot parse key %q", i, it.Key())
+					}
+					if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil {
+						t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err)
+					} else if n < 2 {
+						t.Errorf("iter %d: Cannot parse value %q", i, it.Value())
+					}
+
+					if vk != k {
+						t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk)
+					}
+				}
+				if err := it.Error(); err != nil {
+					t.Errorf("iter %d: Got error: %v", i, err)
+				}
+				it.Release()
+				closeWg.Done()
+			}(i)
+		}
+
+		atomic.StoreUint32(&stop, 1)
+		closeWg.Wait()
+	})
+
+	runtime.GOMAXPROCS(1)
+}
+
+func TestDB_CreateReopenDbOnFile(t *testing.T) {
+	dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
+	if err := os.RemoveAll(dbpath); err != nil {
+		t.Fatal("cannot remove old db: ", err)
+	}
+	defer os.RemoveAll(dbpath)
+
+	for i := 0; i < 3; i++ {
+		stor, err := storage.OpenFile(dbpath)
+		if err != nil {
+			t.Fatalf("(%d) cannot open storage: %s", i, err)
+		}
+		db, err := Open(stor, nil)
+		if err != nil {
+			t.Fatalf("(%d) cannot open db: %s", i, err)
+		}
+		if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
+			t.Fatalf("(%d) cannot write to db: %s", i, err)
+		}
+		if err := db.Close(); err != nil {
+			t.Fatalf("(%d) cannot close db: %s", i, err)
+		}
+		if err := stor.Close(); err != nil {
+			t.Fatalf("(%d) cannot close storage: %s", i, err)
+		}
+	}
+}
+
+func TestDB_CreateReopenDbOnFile2(t *testing.T) {
+	dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid()))
+	if err := os.RemoveAll(dbpath); err != nil {
+		t.Fatal("cannot remove old db: ", err)
+	}
+	defer os.RemoveAll(dbpath)
+
+	for i := 0; i < 3; i++ {
+		db, err := OpenFile(dbpath, nil)
+		if err != nil {
+			t.Fatalf("(%d) cannot open db: %s", i, err)
+		}
+		if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
+			t.Fatalf("(%d) cannot write to db: %s", i, err)
+		}
+		if err := db.Close(); err != nil {
+			t.Fatalf("(%d) cannot close db: %s", i, err)
+		}
+	}
+}
+
+func TestDB_DeletionMarkersOnMemdb(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.put("foo", "v1")
+	h.compactMem()
+	h.delete("foo")
+	h.get("foo", false)
+	h.getKeyVal("")
+}
+
+func TestDB_LeveldbIssue178(t *testing.T) {
+	nKeys := (opt.DefaultCompactionTableSize / 30) * 5
+	key1 := func(i int) string {
+		return fmt.Sprintf("my_key_%d", i)
+	}
+	key2 := func(i int) string {
+		return fmt.Sprintf("my_key_%d_xxx", i)
+	}
+
+	// Disable compression since it affects the creation of layers and the
+	// code below is trying to test against a very specific scenario.
+	h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
+	defer h.close()
+
+	// Create first key range.
+	batch := new(Batch)
+	for i := 0; i < nKeys; i++ {
+		batch.Put([]byte(key1(i)), []byte("value for range 1 key"))
+	}
+	h.write(batch)
+
+	// Create second key range.
+	batch.Reset()
+	for i := 0; i < nKeys; i++ {
+		batch.Put([]byte(key2(i)), []byte("value for range 2 key"))
+	}
+	h.write(batch)
+
+	// Delete second key range.
+	batch.Reset()
+	for i := 0; i < nKeys; i++ {
+		batch.Delete([]byte(key2(i)))
+	}
+	h.write(batch)
+	h.waitMemCompaction()
+
+	// Run manual compaction.
+	h.compactRange(key1(0), key1(nKeys-1))
+
+	// Checking the keys.
+	h.assertNumKeys(nKeys)
+}
+
+func TestDB_LeveldbIssue200(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.put("1", "b")
+	h.put("2", "c")
+	h.put("3", "d")
+	h.put("4", "e")
+	h.put("5", "f")
+
+	iter := h.db.NewIterator(nil, h.ro)
+
+	// Add an element that should not be reflected in the iterator.
+	h.put("25", "cd")
+
+	iter.Seek([]byte("5"))
+	assertBytes(t, []byte("5"), iter.Key())
+	iter.Prev()
+	assertBytes(t, []byte("4"), iter.Key())
+	iter.Prev()
+	assertBytes(t, []byte("3"), iter.Key())
+	iter.Next()
+	assertBytes(t, []byte("4"), iter.Key())
+	iter.Next()
+	assertBytes(t, []byte("5"), iter.Key())
+}
+
+func TestDB_GoleveldbIssue74(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer: 1 * opt.MiB,
+	})
+	defer h.close()
+
+	const n, dur = 10000, 5 * time.Second
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	until := time.Now().Add(dur)
+	wg := new(sync.WaitGroup)
+	wg.Add(2)
+	var done uint32
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("WRITER DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+
+		b := new(Batch)
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			iv := fmt.Sprintf("VAL%010d", i)
+			for k := 0; k < n; k++ {
+				key := fmt.Sprintf("KEY%06d", k)
+				b.Put([]byte(key), []byte(key+iv))
+				b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key))
+			}
+			h.write(b)
+
+			b.Reset()
+			snap := h.getSnapshot()
+			iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
+			var k int
+			for ; iter.Next(); k++ {
+				ptrKey := iter.Key()
+				key := iter.Value()
+
+				if _, err := snap.Get(ptrKey, nil); err != nil {
+					t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err)
+				}
+				if value, err := snap.Get(key, nil); err != nil {
+					t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err)
+				} else if string(value) != string(key)+iv {
+					t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value)
+				}
+
+				b.Delete(key)
+				b.Delete(ptrKey)
+			}
+			h.write(b)
+			iter.Release()
+			snap.Release()
+			if k != n {
+				t.Fatalf("#%d %d != %d", i, k, n)
+			}
+		}
+	}()
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("READER DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			snap := h.getSnapshot()
+			iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
+			var prevValue string
+			var k int
+			for ; iter.Next(); k++ {
+				ptrKey := iter.Key()
+				key := iter.Value()
+
+				if _, err := snap.Get(ptrKey, nil); err != nil {
+					t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err)
+				}
+
+				if value, err := snap.Get(key, nil); err != nil {
+					t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err)
+				} else if prevValue != "" && string(value) != string(key)+prevValue {
+					t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value)
+				} else {
+					prevValue = string(value[len(key):])
+				}
+			}
+			iter.Release()
+			snap.Release()
+			if k > 0 && k != n {
+				t.Fatalf("#%d %d != %d", i, k, n)
+			}
+		}
+	}()
+	wg.Wait()
+}
+
+func TestDB_GetProperties(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	_, err := h.db.GetProperty("leveldb.num-files-at-level")
+	if err == nil {
+		t.Error("GetProperty() failed to detect missing level")
+	}
+
+	_, err = h.db.GetProperty("leveldb.num-files-at-level0")
+	if err != nil {
+		t.Error("got unexpected error", err)
+	}
+
+	_, err = h.db.GetProperty("leveldb.num-files-at-level0x")
+	if err == nil {
+		t.Error("GetProperty() failed to detect invalid level")
+	}
+}
+
+func TestDB_GoleveldbIssue72and83(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer:            1 * opt.MiB,
+		OpenFilesCacheCapacity: 3,
+	})
+	defer h.close()
+
+	const n, wn, dur = 10000, 100, 30 * time.Second
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	randomData := func(prefix byte, i int) []byte {
+		data := make([]byte, 1+4+32+64+32)
+		_, err := crand.Reader.Read(data[1 : len(data)-8])
+		if err != nil {
+			panic(err)
+		}
+		data[0] = prefix
+		binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i))
+		binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value())
+		return data
+	}
+
+	keys := make([][]byte, n)
+	for i := range keys {
+		keys[i] = randomData(1, 0)
+	}
+
+	until := time.Now().Add(dur)
+	wg := new(sync.WaitGroup)
+	wg.Add(3)
+	var done uint32
+	go func() {
+		i := 0
+		defer func() {
+			t.Logf("WRITER DONE #%d", i)
+			wg.Done()
+		}()
+
+		b := new(Batch)
+		for ; i < wn && atomic.LoadUint32(&done) == 0; i++ {
+			b.Reset()
+			for _, k1 := range keys {
+				k2 := randomData(2, i)
+				b.Put(k2, randomData(42, i))
+				b.Put(k1, k2)
+			}
+			if err := h.db.Write(b, h.wo); err != nil {
+				atomic.StoreUint32(&done, 1)
+				t.Fatalf("WRITER #%d db.Write: %v", i, err)
+			}
+		}
+	}()
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("READER0 DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			snap := h.getSnapshot()
+			seq := snap.elem.seq
+			if seq == 0 {
+				snap.Release()
+				continue
+			}
+			iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
+			writei := int(seq/(n*2) - 1)
+			var k int
+			for ; iter.Next(); k++ {
+				k1 := iter.Key()
+				k2 := iter.Value()
+				k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:])
+				k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value()
+				if k1checksum0 != k1checksum1 {
+					t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0)
+				}
+				k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:])
+				k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value()
+				if k2checksum0 != k2checksum1 {
+					t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1)
+				}
+				kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:]))
+				if writei != kwritei {
+					t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei)
+				}
+				if _, err := snap.Get(k2, nil); err != nil {
+					t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2)
+				}
+			}
+			if err := iter.Error(); err != nil {
+				t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err)
+			}
+			iter.Release()
+			snap.Release()
+			if k > 0 && k != n {
+				t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n)
+			}
+		}
+	}()
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("READER1 DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			iter := h.db.NewIterator(nil, nil)
+			seq := iter.(*dbIter).seq
+			if seq == 0 {
+				iter.Release()
+				continue
+			}
+			writei := int(seq/(n*2) - 1)
+			var k int
+			for ok := iter.Last(); ok; ok = iter.Prev() {
+				k++
+			}
+			if err := iter.Error(); err != nil {
+				t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err)
+			}
+			iter.Release()
+			if m := (writei+1)*n + n; k != m {
+				t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m)
+			}
+		}
+	}()
+
+	wg.Wait()
+}
+
+func TestDB_TransientError(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer:              128 * opt.KiB,
+		OpenFilesCacheCapacity:   3,
+		DisableCompactionBackoff: true,
+	})
+	defer h.close()
+
+	const (
+		nSnap = 20
+		nKey  = 10000
+	)
+
+	var (
+		snaps [nSnap]*Snapshot
+		b     = &Batch{}
+	)
+	for i := range snaps {
+		vtail := fmt.Sprintf("VAL%030d", i)
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%8d", k)
+			b.Put([]byte(key), []byte(key+vtail))
+		}
+		h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
+		if err := h.db.Write(b, nil); err != nil {
+			t.Logf("WRITE #%d error: %v", i, err)
+			h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite)
+			for {
+				if err := h.db.Write(b, nil); err == nil {
+					break
+				} else if errors.IsCorrupted(err) {
+					t.Fatalf("WRITE #%d corrupted: %v", i, err)
+				}
+			}
+		}
+
+		snaps[i] = h.db.newSnapshot()
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%8d", k)
+			b.Delete([]byte(key))
+		}
+		h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
+		if err := h.db.Write(b, nil); err != nil {
+			t.Logf("WRITE #%d  error: %v", i, err)
+			h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
+			for {
+				if err := h.db.Write(b, nil); err == nil {
+					break
+				} else if errors.IsCorrupted(err) {
+					t.Fatalf("WRITE #%d corrupted: %v", i, err)
+				}
+			}
+		}
+	}
+	h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	rnd := rand.New(rand.NewSource(0xecafdaed))
+	wg := &sync.WaitGroup{}
+	for i, snap := range snaps {
+		wg.Add(2)
+
+		go func(i int, snap *Snapshot, sk []int) {
+			defer wg.Done()
+
+			vtail := fmt.Sprintf("VAL%030d", i)
+			for _, k := range sk {
+				key := fmt.Sprintf("KEY%8d", k)
+				xvalue, err := snap.Get([]byte(key), nil)
+				if err != nil {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
+				}
+				value := key + vtail
+				if !bytes.Equal([]byte(value), xvalue) {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
+				}
+			}
+		}(i, snap, rnd.Perm(nKey))
+
+		go func(i int, snap *Snapshot) {
+			defer wg.Done()
+
+			vtail := fmt.Sprintf("VAL%030d", i)
+			iter := snap.NewIterator(nil, nil)
+			defer iter.Release()
+			for k := 0; k < nKey; k++ {
+				if !iter.Next() {
+					if err := iter.Error(); err != nil {
+						t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err)
+					} else {
+						t.Fatalf("READER_ITER #%d K%d eoi", i, k)
+					}
+				}
+				key := fmt.Sprintf("KEY%8d", k)
+				xkey := iter.Key()
+				if !bytes.Equal([]byte(key), xkey) {
+					t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey)
+				}
+				value := key + vtail
+				xvalue := iter.Value()
+				if !bytes.Equal([]byte(value), xvalue) {
+					t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue)
+				}
+			}
+		}(i, snap)
+	}
+
+	wg.Wait()
+}
+
+func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer:                 112 * opt.KiB,
+		CompactionTableSize:         90 * opt.KiB,
+		CompactionExpandLimitFactor: 1,
+	})
+	defer h.close()
+
+	const (
+		nSnap = 190
+		nKey  = 140
+	)
+
+	var (
+		snaps [nSnap]*Snapshot
+		b     = &Batch{}
+	)
+	for i := range snaps {
+		vtail := fmt.Sprintf("VAL%030d", i)
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%08d", k)
+			b.Put([]byte(key), []byte(key+vtail))
+		}
+		if err := h.db.Write(b, nil); err != nil {
+			t.Fatalf("WRITE #%d error: %v", i, err)
+		}
+
+		snaps[i] = h.db.newSnapshot()
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%08d", k)
+			b.Delete([]byte(key))
+		}
+		if err := h.db.Write(b, nil); err != nil {
+			t.Fatalf("WRITE #%d  error: %v", i, err)
+		}
+	}
+
+	h.compactMem()
+
+	h.waitCompaction()
+	for level, tables := range h.db.s.stVersion.tables {
+		for _, table := range tables {
+			t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+		}
+	}
+
+	h.compactRangeAt(0, "", "")
+	h.waitCompaction()
+	for level, tables := range h.db.s.stVersion.tables {
+		for _, table := range tables {
+			t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+		}
+	}
+	h.compactRangeAt(1, "", "")
+	h.waitCompaction()
+	for level, tables := range h.db.s.stVersion.tables {
+		for _, table := range tables {
+			t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+		}
+	}
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	wg := &sync.WaitGroup{}
+	for i, snap := range snaps {
+		wg.Add(1)
+
+		go func(i int, snap *Snapshot) {
+			defer wg.Done()
+
+			vtail := fmt.Sprintf("VAL%030d", i)
+			for k := 0; k < nKey; k++ {
+				key := fmt.Sprintf("KEY%08d", k)
+				xvalue, err := snap.Get([]byte(key), nil)
+				if err != nil {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
+				}
+				value := key + vtail
+				if !bytes.Equal([]byte(value), xvalue) {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
+				}
+			}
+		}(i, snap)
+	}
+
+	wg.Wait()
+}
+
+func TestDB_TableCompactionBuilder(t *testing.T) {
+	stor := newTestStorage(t)
+	defer stor.Close()
+
+	const nSeq = 99
+
+	o := &opt.Options{
+		WriteBuffer:                 112 * opt.KiB,
+		CompactionTableSize:         43 * opt.KiB,
+		CompactionExpandLimitFactor: 1,
+		CompactionGPOverlapsFactor:  1,
+		DisableBlockCache:           true,
+	}
+	s, err := newSession(stor, o)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := s.create(); err != nil {
+		t.Fatal(err)
+	}
+	defer s.close()
+	var (
+		seq        uint64
+		targetSize = 5 * o.CompactionTableSize
+		value      = bytes.Repeat([]byte{'0'}, 100)
+	)
+	for i := 0; i < 2; i++ {
+		tw, err := s.tops.create()
+		if err != nil {
+			t.Fatal(err)
+		}
+		for k := 0; tw.tw.BytesLen() < targetSize; k++ {
+			key := []byte(fmt.Sprintf("%09d", k))
+			seq += nSeq - 1
+			for x := uint64(0); x < nSeq; x++ {
+				if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil {
+					t.Fatal(err)
+				}
+			}
+		}
+		tf, err := tw.finish()
+		if err != nil {
+			t.Fatal(err)
+		}
+		rec := &sessionRecord{}
+		rec.addTableFile(i, tf)
+		if err := s.commit(rec); err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	// Build grandparent.
+	v := s.version()
+	c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
+	rec := &sessionRecord{}
+	b := &tableCompactionBuilder{
+		s:         s,
+		c:         c,
+		rec:       rec,
+		stat1:     new(cStatsStaging),
+		minSeq:    0,
+		strict:    true,
+		tableSize: o.CompactionTableSize/3 + 961,
+	}
+	if err := b.run(new(compactionTransactCounter)); err != nil {
+		t.Fatal(err)
+	}
+	for _, t := range c.tables[0] {
+		rec.delTable(c.level, t.file.Num())
+	}
+	if err := s.commit(rec); err != nil {
+		t.Fatal(err)
+	}
+	c.release()
+
+	// Build level-1.
+	v = s.version()
+	c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
+	rec = &sessionRecord{}
+	b = &tableCompactionBuilder{
+		s:         s,
+		c:         c,
+		rec:       rec,
+		stat1:     new(cStatsStaging),
+		minSeq:    0,
+		strict:    true,
+		tableSize: o.CompactionTableSize,
+	}
+	if err := b.run(new(compactionTransactCounter)); err != nil {
+		t.Fatal(err)
+	}
+	for _, t := range c.tables[0] {
+		rec.delTable(c.level, t.file.Num())
+	}
+	// Move grandparent to level-3
+	for _, t := range v.tables[2] {
+		rec.delTable(2, t.file.Num())
+		rec.addTableFile(3, t)
+	}
+	if err := s.commit(rec); err != nil {
+		t.Fatal(err)
+	}
+	c.release()
+
+	v = s.version()
+	for level, want := range []bool{false, true, false, true, false} {
+		got := len(v.tables[level]) > 0
+		if want != got {
+			t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got)
+		}
+	}
+	for i, f := range v.tables[1][:len(v.tables[1])-1] {
+		nf := v.tables[1][i+1]
+		if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) {
+			t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num())
+		}
+	}
+	v.release()
+
+	// Compaction with transient error.
+	v = s.version()
+	c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
+	rec = &sessionRecord{}
+	b = &tableCompactionBuilder{
+		s:         s,
+		c:         c,
+		rec:       rec,
+		stat1:     new(cStatsStaging),
+		minSeq:    0,
+		strict:    true,
+		tableSize: o.CompactionTableSize,
+	}
+	stor.SetEmuErrOnce(storage.TypeTable, tsOpSync)
+	stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite)
+	stor.SetEmuRandErrProb(0xf0)
+	for {
+		if err := b.run(new(compactionTransactCounter)); err != nil {
+			t.Logf("(expected) b.run: %v", err)
+		} else {
+			break
+		}
+	}
+	if err := s.commit(rec); err != nil {
+		t.Fatal(err)
+	}
+	c.release()
+
+	stor.SetEmuErrOnce(0, tsOpSync)
+	stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite)
+
+	v = s.version()
+	if len(v.tables[1]) != len(v.tables[2]) {
+		t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2]))
+	}
+	for i, f0 := range v.tables[1] {
+		f1 := v.tables[2][i]
+		iter0 := s.tops.newIterator(f0, nil, nil)
+		iter1 := s.tops.newIterator(f1, nil, nil)
+		for j := 0; true; j++ {
+			next0 := iter0.Next()
+			next1 := iter1.Next()
+			if next0 != next1 {
+				t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1)
+			}
+			key0 := iter0.Key()
+			key1 := iter1.Key()
+			if !bytes.Equal(key0, key1) {
+				t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1)
+			}
+			if next0 == false {
+				break
+			}
+		}
+		iter0.Release()
+		iter1.Release()
+	}
+	v.release()
+}
+
+func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
+	const (
+		vSize = 200 * opt.KiB
+		tSize = 100 * opt.MiB
+		mIter = 100
+		n     = tSize / vSize
+	)
+
+	h := newDbHarnessWopt(t, &opt.Options{
+		Compression:       opt.NoCompression,
+		DisableBlockCache: true,
+	})
+	defer h.close()
+
+	key := func(x int) string {
+		return fmt.Sprintf("v%06d", x)
+	}
+
+	// Fill.
+	value := strings.Repeat("x", vSize)
+	for i := 0; i < n; i++ {
+		h.put(key(i), value)
+	}
+	h.compactMem()
+
+	// Delete all.
+	for i := 0; i < n; i++ {
+		h.delete(key(i))
+	}
+	h.compactMem()
+
+	var (
+		limit = n / limitDiv
+
+		startKey = key(0)
+		limitKey = key(limit)
+		maxKey   = key(n)
+		slice    = &util.Range{Limit: []byte(limitKey)}
+
+		initialSize0 = h.sizeOf(startKey, limitKey)
+		initialSize1 = h.sizeOf(limitKey, maxKey)
+	)
+
+	t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
+
+	for r := 0; true; r++ {
+		if r >= mIter {
+			t.Fatal("taking too long to compact")
+		}
+
+		// Iterates.
+		iter := h.db.NewIterator(slice, h.ro)
+		for iter.Next() {
+		}
+		if err := iter.Error(); err != nil {
+			t.Fatalf("Iter err: %v", err)
+		}
+		iter.Release()
+
+		// Wait compaction.
+		h.waitCompaction()
+
+		// Check size.
+		size0 := h.sizeOf(startKey, limitKey)
+		size1 := h.sizeOf(limitKey, maxKey)
+		t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
+		if size0 < initialSize0/10 {
+			break
+		}
+	}
+
+	if initialSize1 > 0 {
+		h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
+	}
+}
+
+func TestDB_IterTriggeredCompaction(t *testing.T) {
+	testDB_IterTriggeredCompaction(t, 1)
+}
+
+func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
+	testDB_IterTriggeredCompaction(t, 2)
+}
+
+func TestDB_ReadOnly(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	h.put("foo", "v1")
+	h.put("bar", "v2")
+	h.compactMem()
+
+	h.put("xfoo", "v1")
+	h.put("xbar", "v2")
+
+	t.Log("Trigger read-only")
+	if err := h.db.SetReadOnly(); err != nil {
+		h.close()
+		t.Fatalf("SetReadOnly error: %v", err)
+	}
+
+	h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync)
+
+	ro := func(key, value, wantValue string) {
+		if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly {
+			t.Fatalf("unexpected error: %v", err)
+		}
+		h.getVal(key, wantValue)
+	}
+
+	ro("foo", "vx", "v1")
+
+	h.o.ReadOnly = true
+	h.reopenDB()
+
+	ro("foo", "vx", "v1")
+	ro("bar", "vx", "v2")
+	h.assertNumKeys(4)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
new file mode 100644
index 0000000..9ae3e45
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader is the interface that wraps basic Get and NewIterator methods.
+// This interface implemented by both DB and Snapshot.
+type Reader interface {
+	Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
+	NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
+}
+
+type Sizes []uint64
+
+// Sum returns sum of the sizes.
+func (p Sizes) Sum() (n uint64) {
+	for _, s := range p {
+		n += s
+	}
+	return n
+}
+
+// Logging.
+func (db *DB) log(v ...interface{})                 { db.s.log(v...) }
+func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
+
+// Check and clean files.
+func (db *DB) checkAndCleanFiles() error {
+	v := db.s.version()
+	defer v.release()
+
+	tablesMap := make(map[uint64]bool)
+	for _, tables := range v.tables {
+		for _, t := range tables {
+			tablesMap[t.file.Num()] = false
+		}
+	}
+
+	files, err := db.s.getFiles(storage.TypeAll)
+	if err != nil {
+		return err
+	}
+
+	var nTables int
+	var rem []storage.File
+	for _, f := range files {
+		keep := true
+		switch f.Type() {
+		case storage.TypeManifest:
+			keep = f.Num() >= db.s.manifestFile.Num()
+		case storage.TypeJournal:
+			if db.frozenJournalFile != nil {
+				keep = f.Num() >= db.frozenJournalFile.Num()
+			} else {
+				keep = f.Num() >= db.journalFile.Num()
+			}
+		case storage.TypeTable:
+			_, keep = tablesMap[f.Num()]
+			if keep {
+				tablesMap[f.Num()] = true
+				nTables++
+			}
+		}
+
+		if !keep {
+			rem = append(rem, f)
+		}
+	}
+
+	if nTables != len(tablesMap) {
+		var missing []*storage.FileInfo
+		for num, present := range tablesMap {
+			if !present {
+				missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
+				db.logf("db@janitor table missing @%d", num)
+			}
+		}
+		return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
+	}
+
+	db.logf("db@janitor F·%d G·%d", len(files), len(rem))
+	for _, f := range rem {
+		db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
+		if err := f.Remove(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
new file mode 100644
index 0000000..9634852
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -0,0 +1,338 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"time"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func (db *DB) writeJournal(b *Batch) error {
+	w, err := db.journal.Next()
+	if err != nil {
+		return err
+	}
+	if _, err := w.Write(b.encode()); err != nil {
+		return err
+	}
+	if err := db.journal.Flush(); err != nil {
+		return err
+	}
+	if b.sync {
+		return db.journalWriter.Sync()
+	}
+	return nil
+}
+
+func (db *DB) jWriter() {
+	defer db.closeW.Done()
+	for {
+		select {
+		case b := <-db.journalC:
+			if b != nil {
+				db.journalAckC <- db.writeJournal(b)
+			}
+		case _, _ = <-db.closeC:
+			return
+		}
+	}
+}
+
+func (db *DB) rotateMem(n int) (mem *memDB, err error) {
+	// Wait for pending memdb compaction.
+	err = db.compSendIdle(db.mcompCmdC)
+	if err != nil {
+		return
+	}
+
+	// Create new memdb and journal.
+	mem, err = db.newMem(n)
+	if err != nil {
+		return
+	}
+
+	// Schedule memdb compaction.
+	db.compSendTrigger(db.mcompCmdC)
+	return
+}
+
+func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
+	delayed := false
+	flush := func() (retry bool) {
+		v := db.s.version()
+		defer v.release()
+		mdb = db.getEffectiveMem()
+		defer func() {
+			if retry {
+				mdb.decref()
+				mdb = nil
+			}
+		}()
+		mdbFree = mdb.Free()
+		switch {
+		case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
+			delayed = true
+			time.Sleep(time.Millisecond)
+		case mdbFree >= n:
+			return false
+		case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
+			delayed = true
+			err = db.compSendIdle(db.tcompCmdC)
+			if err != nil {
+				return false
+			}
+		default:
+			// Allow memdb to grow if it has no entry.
+			if mdb.Len() == 0 {
+				mdbFree = n
+			} else {
+				mdb.decref()
+				mdb, err = db.rotateMem(n)
+				if err == nil {
+					mdbFree = mdb.Free()
+				} else {
+					mdbFree = 0
+				}
+			}
+			return false
+		}
+		return true
+	}
+	start := time.Now()
+	for flush() {
+	}
+	if delayed {
+		db.writeDelay += time.Since(start)
+		db.writeDelayN++
+	} else if db.writeDelayN > 0 {
+		db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+		db.writeDelay = 0
+		db.writeDelayN = 0
+	}
+	return
+}
+
+// Write apply the given batch to the DB. The batch will be applied
+// sequentially.
+//
+// It is safe to modify the contents of the arguments after Write returns.
+func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
+	err = db.ok()
+	if err != nil || b == nil || b.Len() == 0 {
+		return
+	}
+
+	b.init(wo.GetSync() && !db.s.o.GetNoSync())
+
+	// The write happen synchronously.
+	select {
+	case db.writeC <- b:
+		if <-db.writeMergedC {
+			return <-db.writeAckC
+		}
+	case db.writeLockC <- struct{}{}:
+	case err = <-db.compPerErrC:
+		return
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+
+	merged := 0
+	danglingMerge := false
+	defer func() {
+		if danglingMerge {
+			db.writeMergedC <- false
+		} else {
+			<-db.writeLockC
+		}
+		for i := 0; i < merged; i++ {
+			db.writeAckC <- err
+		}
+	}()
+
+	mdb, mdbFree, err := db.flush(b.size())
+	if err != nil {
+		return
+	}
+	defer mdb.decref()
+
+	// Calculate maximum size of the batch.
+	m := 1 << 20
+	if x := b.size(); x <= 128<<10 {
+		m = x + (128 << 10)
+	}
+	m = minInt(m, mdbFree)
+
+	// Merge with other batch.
+drain:
+	for b.size() < m && !b.sync {
+		select {
+		case nb := <-db.writeC:
+			if b.size()+nb.size() <= m {
+				b.append(nb)
+				db.writeMergedC <- true
+				merged++
+			} else {
+				danglingMerge = true
+				break drain
+			}
+		default:
+			break drain
+		}
+	}
+
+	// Set batch first seq number relative from last seq.
+	b.seq = db.seq + 1
+
+	// Write journal concurrently if it is large enough.
+	if b.size() >= (128 << 10) {
+		// Push the write batch to the journal writer
+		select {
+		case db.journalC <- b:
+			// Write into memdb
+			if berr := b.memReplay(mdb.DB); berr != nil {
+				panic(berr)
+			}
+		case err = <-db.compPerErrC:
+			return
+		case _, _ = <-db.closeC:
+			err = ErrClosed
+			return
+		}
+		// Wait for journal writer
+		select {
+		case err = <-db.journalAckC:
+			if err != nil {
+				// Revert memdb if error detected
+				if berr := b.revertMemReplay(mdb.DB); berr != nil {
+					panic(berr)
+				}
+				return
+			}
+		case _, _ = <-db.closeC:
+			err = ErrClosed
+			return
+		}
+	} else {
+		err = db.writeJournal(b)
+		if err != nil {
+			return
+		}
+		if berr := b.memReplay(mdb.DB); berr != nil {
+			panic(berr)
+		}
+	}
+
+	// Set last seq number.
+	db.addSeq(uint64(b.Len()))
+
+	if b.size() >= mdbFree {
+		db.rotateMem(0)
+	}
+	return
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
+	b := new(Batch)
+	b.Put(key, value)
+	return db.Write(b, wo)
+}
+
+// Delete deletes the value for the given key.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
+	b := new(Batch)
+	b.Delete(key)
+	return db.Write(b, wo)
+}
+
+func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
+	iter := mem.NewIterator(nil)
+	defer iter.Release()
+	return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
+		(min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
+}
+
+// CompactRange compacts the underlying DB for the given key range.
+// In particular, deleted and overwritten versions are discarded,
+// and the data is rearranged to reduce the cost of operations
+// needed to access the data. This operation should typically only
+// be invoked by users who understand the underlying implementation.
+//
+// A nil Range.Start is treated as a key before all keys in the DB.
+// And a nil Range.Limit is treated as a key after all keys in the DB.
+// Therefore if both is nil then it will compact entire DB.
+func (db *DB) CompactRange(r util.Range) error {
+	if err := db.ok(); err != nil {
+		return err
+	}
+
+	// Lock writer.
+	select {
+	case db.writeLockC <- struct{}{}:
+	case err := <-db.compPerErrC:
+		return err
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+
+	// Check for overlaps in memdb.
+	mdb := db.getEffectiveMem()
+	defer mdb.decref()
+	if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
+		// Memdb compaction.
+		if _, err := db.rotateMem(0); err != nil {
+			<-db.writeLockC
+			return err
+		}
+		<-db.writeLockC
+		if err := db.compSendIdle(db.mcompCmdC); err != nil {
+			return err
+		}
+	} else {
+		<-db.writeLockC
+	}
+
+	// Table compaction.
+	return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
+}
+
+// SetReadOnly makes DB read-only. It will stay read-only until reopened.
+func (db *DB) SetReadOnly() error {
+	if err := db.ok(); err != nil {
+		return err
+	}
+
+	// Lock writer.
+	select {
+	case db.writeLockC <- struct{}{}:
+		db.compWriteLocking = true
+	case err := <-db.compPerErrC:
+		return err
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+
+	// Set compaction read-only.
+	select {
+	case db.compErrSetC <- ErrReadOnly:
+	case perr := <-db.compPerErrC:
+		return perr
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+
+	return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
new file mode 100644
index 0000000..53f13bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package leveldb provides implementation of LevelDB key/value database.
+//
+// Create or open a database:
+//
+//	db, err := leveldb.OpenFile("path/to/db", nil)
+//	...
+//	defer db.Close()
+//	...
+//
+// Read or modify the database content:
+//
+//	// Remember that the contents of the returned slice should not be modified.
+//	data, err := db.Get([]byte("key"), nil)
+//	...
+//	err = db.Put([]byte("key"), []byte("value"), nil)
+//	...
+//	err = db.Delete([]byte("key"), nil)
+//	...
+//
+// Iterate over database content:
+//
+//	iter := db.NewIterator(nil, nil)
+//	for iter.Next() {
+//		// Remember that the contents of the returned slice should not be modified, and
+//		// only valid until the next call to Next.
+//		key := iter.Key()
+//		value := iter.Value()
+//		...
+//	}
+//	iter.Release()
+//	err = iter.Error()
+//	...
+//
+// Iterate over subset of database content with a particular prefix:
+//	iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+//	for iter.Next() {
+//		// Use key/value.
+//		...
+//	}
+//	iter.Release()
+//	err = iter.Error()
+//	...
+//
+// Seek-then-Iterate:
+//
+// 	iter := db.NewIterator(nil, nil)
+// 	for ok := iter.Seek(key); ok; ok = iter.Next() {
+// 		// Use key/value.
+// 		...
+// 	}
+// 	iter.Release()
+// 	err = iter.Error()
+// 	...
+//
+// Iterate over subset of database content:
+//
+// 	iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
+// 	for iter.Next() {
+// 		// Use key/value.
+// 		...
+// 	}
+// 	iter.Release()
+// 	err = iter.Error()
+// 	...
+//
+// Batch writes:
+//
+//	batch := new(leveldb.Batch)
+//	batch.Put([]byte("foo"), []byte("value"))
+//	batch.Put([]byte("bar"), []byte("another value"))
+//	batch.Delete([]byte("baz"))
+//	err = db.Write(batch, nil)
+//	...
+//
+// Use bloom filter:
+//
+//	o := &opt.Options{
+//		Filter: filter.NewBloomFilter(10),
+//	}
+//	db, err := leveldb.OpenFile("path/to/db", o)
+//	...
+//	defer db.Close()
+//	...
+package leveldb
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
new file mode 100644
index 0000000..7807c12
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+var (
+	ErrNotFound         = errors.ErrNotFound
+	ErrReadOnly         = errors.New("leveldb: read-only mode")
+	ErrSnapshotReleased = errors.New("leveldb: snapshot released")
+	ErrIterReleased     = errors.New("leveldb: iterator released")
+	ErrClosed           = errors.New("leveldb: closed")
+)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
new file mode 100644
index 0000000..3a413c2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package errors provides common error types used throughout leveldb.
+package errors
+
+import (
+	"errors"
+	"fmt"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	ErrNotFound    = New("leveldb: not found")
+	ErrReleased    = util.ErrReleased
+	ErrHasReleaser = util.ErrHasReleaser
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+	return errors.New(text)
+}
+
+// ErrCorrupted is the type that wraps errors that indicate corruption in
+// the database.
+type ErrCorrupted struct {
+	File *storage.FileInfo
+	Err  error
+}
+
+func (e *ErrCorrupted) Error() string {
+	if e.File != nil {
+		return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
+	} else {
+		return e.Err.Error()
+	}
+}
+
+// NewErrCorrupted creates new ErrCorrupted error.
+func NewErrCorrupted(f storage.File, err error) error {
+	return &ErrCorrupted{storage.NewFileInfo(f), err}
+}
+
+// IsCorrupted returns a boolean indicating whether the error is indicating
+// a corruption.
+func IsCorrupted(err error) bool {
+	switch err.(type) {
+	case *ErrCorrupted:
+		return true
+	case *storage.ErrCorrupted:
+		return true
+	}
+	return false
+}
+
+// ErrMissingFiles is the type that indicating a corruption due to missing
+// files. ErrMissingFiles always wrapped with ErrCorrupted.
+type ErrMissingFiles struct {
+	Files []*storage.FileInfo
+}
+
+func (e *ErrMissingFiles) Error() string { return "file missing" }
+
+// SetFile sets 'file info' of the given error with the given file.
+// Currently only ErrCorrupted is supported, otherwise will do nothing.
+func SetFile(err error, f storage.File) error {
+	switch x := err.(type) {
+	case *ErrCorrupted:
+		x.File = storage.NewFileInfo(f)
+		return x
+	}
+	return err
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
new file mode 100644
index 0000000..6a53ba1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+var _ = testutil.Defer(func() {
+	Describe("Leveldb external", func() {
+		o := &opt.Options{
+			DisableBlockCache:      true,
+			BlockRestartInterval:   5,
+			BlockSize:              80,
+			Compression:            opt.NoCompression,
+			OpenFilesCacheCapacity: -1,
+			Strict:                 opt.StrictAll,
+			WriteBuffer:            1000,
+			CompactionTableSize:    2000,
+		}
+
+		Describe("write test", func() {
+			It("should do write correctly", func(done Done) {
+				db := newTestingDB(o, nil, nil)
+				t := testutil.DBTesting{
+					DB:      db,
+					Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(),
+				}
+				testutil.DoDBTesting(&t)
+				db.TestClose()
+				done <- true
+			}, 20.0)
+		})
+
+		Describe("read test", func() {
+			testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
+				// Building the DB.
+				db := newTestingDB(o, nil, nil)
+				kv.IterateShuffled(nil, func(i int, key, value []byte) {
+					err := db.TestPut(key, value)
+					Expect(err).NotTo(HaveOccurred())
+				})
+
+				return db
+			}, func(db testutil.DB) {
+				db.(*testingDB).TestClose()
+			})
+		})
+	})
+})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go
new file mode 100644
index 0000000..992001e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+type iFilter struct {
+	filter.Filter
+}
+
+func (f iFilter) Contains(filter, key []byte) bool {
+	return f.Filter.Contains(filter, iKey(key).ukey())
+}
+
+func (f iFilter) NewGenerator() filter.FilterGenerator {
+	return iFilterGenerator{f.Filter.NewGenerator()}
+}
+
+type iFilterGenerator struct {
+	filter.FilterGenerator
+}
+
+func (g iFilterGenerator) Add(key []byte) {
+	g.FilterGenerator.Add(iKey(key).ukey())
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
new file mode 100644
index 0000000..7f50a92
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package filter
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func bloomHash(key []byte) uint32 {
+	return util.Hash(key, 0xbc9f1d34)
+}
+
+type bloomFilter int
+
+// The bloom filter serializes its parameters and is backward compatible
+// with respect to them. Therefor, its parameters are not added to its
+// name.
+func (bloomFilter) Name() string {
+	return "leveldb.BuiltinBloomFilter"
+}
+
+func (f bloomFilter) Contains(filter, key []byte) bool {
+	nBytes := len(filter) - 1
+	if nBytes < 1 {
+		return false
+	}
+	nBits := uint32(nBytes * 8)
+
+	// Use the encoded k so that we can read filters generated by
+	// bloom filters created using different parameters.
+	k := filter[nBytes]
+	if k > 30 {
+		// Reserved for potentially new encodings for short bloom filters.
+		// Consider it a match.
+		return true
+	}
+
+	kh := bloomHash(key)
+	delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+	for j := uint8(0); j < k; j++ {
+		bitpos := kh % nBits
+		if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 {
+			return false
+		}
+		kh += delta
+	}
+	return true
+}
+
+func (f bloomFilter) NewGenerator() FilterGenerator {
+	// Round down to reduce probing cost a little bit.
+	k := uint8(f * 69 / 100) // 0.69 =~ ln(2)
+	if k < 1 {
+		k = 1
+	} else if k > 30 {
+		k = 30
+	}
+	return &bloomFilterGenerator{
+		n: int(f),
+		k: k,
+	}
+}
+
+type bloomFilterGenerator struct {
+	n int
+	k uint8
+
+	keyHashes []uint32
+}
+
+func (g *bloomFilterGenerator) Add(key []byte) {
+	// Use double-hashing to generate a sequence of hash values.
+	// See analysis in [Kirsch,Mitzenmacher 2006].
+	g.keyHashes = append(g.keyHashes, bloomHash(key))
+}
+
+func (g *bloomFilterGenerator) Generate(b Buffer) {
+	// Compute bloom filter size (in both bits and bytes)
+	nBits := uint32(len(g.keyHashes) * g.n)
+	// For small n, we can see a very high false positive rate.  Fix it
+	// by enforcing a minimum bloom filter length.
+	if nBits < 64 {
+		nBits = 64
+	}
+	nBytes := (nBits + 7) / 8
+	nBits = nBytes * 8
+
+	dest := b.Alloc(int(nBytes) + 1)
+	dest[nBytes] = g.k
+	for _, kh := range g.keyHashes {
+		delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+		for j := uint8(0); j < g.k; j++ {
+			bitpos := kh % nBits
+			dest[bitpos/8] |= (1 << (bitpos % 8))
+			kh += delta
+		}
+	}
+
+	g.keyHashes = g.keyHashes[:0]
+}
+
+// NewBloomFilter creates a new initialized bloom filter for given
+// bitsPerKey.
+//
+// Since bitsPerKey is persisted individually for each bloom filter
+// serialization, bloom filters are backwards compatible with respect to
+// changing bitsPerKey. This means that no big performance penalty will
+// be experienced when changing the parameter. See documentation for
+// opt.Options.Filter for more information.
+func NewBloomFilter(bitsPerKey int) Filter {
+	return bloomFilter(bitsPerKey)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go
new file mode 100644
index 0000000..122a246
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package filter
+
+import (
+	"encoding/binary"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+	"testing"
+)
+
+type harness struct {
+	t *testing.T
+
+	bloom     Filter
+	generator FilterGenerator
+	filter    []byte
+}
+
+func newHarness(t *testing.T) *harness {
+	bloom := NewBloomFilter(10)
+	return &harness{
+		t:         t,
+		bloom:     bloom,
+		generator: bloom.NewGenerator(),
+	}
+}
+
+func (h *harness) add(key []byte) {
+	h.generator.Add(key)
+}
+
+func (h *harness) addNum(key uint32) {
+	var b [4]byte
+	binary.LittleEndian.PutUint32(b[:], key)
+	h.add(b[:])
+}
+
+func (h *harness) build() {
+	b := &util.Buffer{}
+	h.generator.Generate(b)
+	h.filter = b.Bytes()
+}
+
+func (h *harness) reset() {
+	h.filter = nil
+}
+
+func (h *harness) filterLen() int {
+	return len(h.filter)
+}
+
+func (h *harness) assert(key []byte, want, silent bool) bool {
+	got := h.bloom.Contains(h.filter, key)
+	if !silent && got != want {
+		h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want)
+	}
+	return got
+}
+
+func (h *harness) assertNum(key uint32, want, silent bool) bool {
+	var b [4]byte
+	binary.LittleEndian.PutUint32(b[:], key)
+	return h.assert(b[:], want, silent)
+}
+
+func TestBloomFilter_Empty(t *testing.T) {
+	h := newHarness(t)
+	h.build()
+	h.assert([]byte("hello"), false, false)
+	h.assert([]byte("world"), false, false)
+}
+
+func TestBloomFilter_Small(t *testing.T) {
+	h := newHarness(t)
+	h.add([]byte("hello"))
+	h.add([]byte("world"))
+	h.build()
+	h.assert([]byte("hello"), true, false)
+	h.assert([]byte("world"), true, false)
+	h.assert([]byte("x"), false, false)
+	h.assert([]byte("foo"), false, false)
+}
+
+func nextN(n int) int {
+	switch {
+	case n < 10:
+		n += 1
+	case n < 100:
+		n += 10
+	case n < 1000:
+		n += 100
+	default:
+		n += 1000
+	}
+	return n
+}
+
+func TestBloomFilter_VaryingLengths(t *testing.T) {
+	h := newHarness(t)
+	var mediocre, good int
+	for n := 1; n < 10000; n = nextN(n) {
+		h.reset()
+		for i := 0; i < n; i++ {
+			h.addNum(uint32(i))
+		}
+		h.build()
+
+		got := h.filterLen()
+		want := (n * 10 / 8) + 40
+		if got > want {
+			t.Errorf("filter len test failed, '%d' > '%d'", got, want)
+		}
+
+		for i := 0; i < n; i++ {
+			h.assertNum(uint32(i), true, false)
+		}
+
+		var rate float32
+		for i := 0; i < 10000; i++ {
+			if h.assertNum(uint32(i+1000000000), true, true) {
+				rate++
+			}
+		}
+		rate /= 10000
+		if rate > 0.02 {
+			t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n)
+		}
+		if rate > 0.0125 {
+			mediocre++
+		} else {
+			good++
+		}
+	}
+	t.Logf("false positive rate: %d good, %d mediocre", good, mediocre)
+	if mediocre > good/5 {
+		t.Error("mediocre false positive rate is more than expected")
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go
new file mode 100644
index 0000000..7a925c5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package filter provides interface and implementation of probabilistic
+// data structure.
+//
+// The filter is resposible for creating small filter from a set of keys.
+// These filter will then used to test whether a key is a member of the set.
+// In many cases, a filter can cut down the number of disk seeks from a
+// handful to a single disk seek per DB.Get call.
+package filter
+
+// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods.
+type Buffer interface {
+	// Alloc allocs n bytes of slice from the buffer. This also advancing
+	// write offset.
+	Alloc(n int) []byte
+
+	// Write appends the contents of p to the buffer.
+	Write(p []byte) (n int, err error)
+
+	// WriteByte appends the byte c to the buffer.
+	WriteByte(c byte) error
+}
+
+// Filter is the filter.
+type Filter interface {
+	// Name returns the name of this policy.
+	//
+	// Note that if the filter encoding changes in an incompatible way,
+	// the name returned by this method must be changed. Otherwise, old
+	// incompatible filters may be passed to methods of this type.
+	Name() string
+
+	// NewGenerator creates a new filter generator.
+	NewGenerator() FilterGenerator
+
+	// Contains returns true if the filter contains the given key.
+	//
+	// The filter are filters generated by the filter generator.
+	Contains(filter, key []byte) bool
+}
+
+// FilterGenerator is the filter generator.
+type FilterGenerator interface {
+	// Add adds a key to the filter generator.
+	//
+	// The key may become invalid after call to this method end, therefor
+	// key must be copied if implementation require keeping key for later
+	// use. The key should not modified directly, doing so may cause
+	// undefined results.
+	Add(key []byte)
+
+	// Generate generates filters based on keys passed so far. After call
+	// to Generate the filter generator maybe resetted, depends on implementation.
+	Generate(b Buffer)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
new file mode 100644
index 0000000..53ff7c8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
@@ -0,0 +1,184 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// BasicArray is the interface that wraps basic Len and Search method.
+type BasicArray interface {
+	// Len returns length of the array.
+	Len() int
+
+	// Search finds smallest index that point to a key that is greater
+	// than or equal to the given key.
+	Search(key []byte) int
+}
+
+// Array is the interface that wraps BasicArray and basic Index method.
+type Array interface {
+	BasicArray
+
+	// Index returns key/value pair with index of i.
+	Index(i int) (key, value []byte)
+}
+
+// Array is the interface that wraps BasicArray and basic Get method.
+type ArrayIndexer interface {
+	BasicArray
+
+	// Get returns a new data iterator with index of i.
+	Get(i int) Iterator
+}
+
+type basicArrayIterator struct {
+	util.BasicReleaser
+	array BasicArray
+	pos   int
+	err   error
+}
+
+func (i *basicArrayIterator) Valid() bool {
+	return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
+}
+
+func (i *basicArrayIterator) First() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.array.Len() == 0 {
+		i.pos = -1
+		return false
+	}
+	i.pos = 0
+	return true
+}
+
+func (i *basicArrayIterator) Last() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	n := i.array.Len()
+	if n == 0 {
+		i.pos = 0
+		return false
+	}
+	i.pos = n - 1
+	return true
+}
+
+func (i *basicArrayIterator) Seek(key []byte) bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	n := i.array.Len()
+	if n == 0 {
+		i.pos = 0
+		return false
+	}
+	i.pos = i.array.Search(key)
+	if i.pos >= n {
+		return false
+	}
+	return true
+}
+
+func (i *basicArrayIterator) Next() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	i.pos++
+	if n := i.array.Len(); i.pos >= n {
+		i.pos = n
+		return false
+	}
+	return true
+}
+
+func (i *basicArrayIterator) Prev() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	i.pos--
+	if i.pos < 0 {
+		i.pos = -1
+		return false
+	}
+	return true
+}
+
+func (i *basicArrayIterator) Error() error { return i.err }
+
+type arrayIterator struct {
+	basicArrayIterator
+	array      Array
+	pos        int
+	key, value []byte
+}
+
+func (i *arrayIterator) updateKV() {
+	if i.pos == i.basicArrayIterator.pos {
+		return
+	}
+	i.pos = i.basicArrayIterator.pos
+	if i.Valid() {
+		i.key, i.value = i.array.Index(i.pos)
+	} else {
+		i.key = nil
+		i.value = nil
+	}
+}
+
+func (i *arrayIterator) Key() []byte {
+	i.updateKV()
+	return i.key
+}
+
+func (i *arrayIterator) Value() []byte {
+	i.updateKV()
+	return i.value
+}
+
+type arrayIteratorIndexer struct {
+	basicArrayIterator
+	array ArrayIndexer
+}
+
+func (i *arrayIteratorIndexer) Get() Iterator {
+	if i.Valid() {
+		return i.array.Get(i.basicArrayIterator.pos)
+	}
+	return nil
+}
+
+// NewArrayIterator returns an iterator from the given array.
+func NewArrayIterator(array Array) Iterator {
+	return &arrayIterator{
+		basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+		array:              array,
+		pos:                -1,
+	}
+}
+
+// NewArrayIndexer returns an index iterator from the given array.
+func NewArrayIndexer(array ArrayIndexer) IteratorIndexer {
+	return &arrayIteratorIndexer{
+		basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+		array:              array,
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go
new file mode 100644
index 0000000..ef26200
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator_test
+
+import (
+	. "github.com/onsi/ginkgo"
+
+	. "git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+var _ = testutil.Defer(func() {
+	Describe("Array iterator", func() {
+		It("Should iterates and seeks correctly", func() {
+			// Build key/value.
+			kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3)
+
+			// Test the iterator.
+			t := testutil.IteratorTesting{
+				KeyValue: kv.Clone(),
+				Iter:     NewArrayIterator(kv),
+			}
+			testutil.DoIteratorTesting(&t)
+		})
+	})
+})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
new file mode 100644
index 0000000..147d18a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// IteratorIndexer is the interface that wraps CommonIterator and basic Get
+// method. IteratorIndexer provides index for indexed iterator.
+type IteratorIndexer interface {
+	CommonIterator
+
+	// Get returns a new data iterator for the current position, or nil if
+	// done.
+	Get() Iterator
+}
+
+type indexedIterator struct {
+	util.BasicReleaser
+	index  IteratorIndexer
+	strict bool
+
+	data   Iterator
+	err    error
+	errf   func(err error)
+	closed bool
+}
+
+func (i *indexedIterator) setData() {
+	if i.data != nil {
+		i.data.Release()
+	}
+	i.data = i.index.Get()
+}
+
+func (i *indexedIterator) clearData() {
+	if i.data != nil {
+		i.data.Release()
+	}
+	i.data = nil
+}
+
+func (i *indexedIterator) indexErr() {
+	if err := i.index.Error(); err != nil {
+		if i.errf != nil {
+			i.errf(err)
+		}
+		i.err = err
+	}
+}
+
+func (i *indexedIterator) dataErr() bool {
+	if err := i.data.Error(); err != nil {
+		if i.errf != nil {
+			i.errf(err)
+		}
+		if i.strict || !errors.IsCorrupted(err) {
+			i.err = err
+			return true
+		}
+	}
+	return false
+}
+
+func (i *indexedIterator) Valid() bool {
+	return i.data != nil && i.data.Valid()
+}
+
+func (i *indexedIterator) First() bool {
+	if i.err != nil {
+		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if !i.index.First() {
+		i.indexErr()
+		i.clearData()
+		return false
+	}
+	i.setData()
+	return i.Next()
+}
+
+func (i *indexedIterator) Last() bool {
+	if i.err != nil {
+		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if !i.index.Last() {
+		i.indexErr()
+		i.clearData()
+		return false
+	}
+	i.setData()
+	if !i.data.Last() {
+		if i.dataErr() {
+			return false
+		}
+		i.clearData()
+		return i.Prev()
+	}
+	return true
+}
+
+func (i *indexedIterator) Seek(key []byte) bool {
+	if i.err != nil {
+		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if !i.index.Seek(key) {
+		i.indexErr()
+		i.clearData()
+		return false
+	}
+	i.setData()
+	if !i.data.Seek(key) {
+		if i.dataErr() {
+			return false
+		}
+		i.clearData()
+		return i.Next()
+	}
+	return true
+}
+
+func (i *indexedIterator) Next() bool {
+	if i.err != nil {
+		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	switch {
+	case i.data != nil && !i.data.Next():
+		if i.dataErr() {
+			return false
+		}
+		i.clearData()
+		fallthrough
+	case i.data == nil:
+		if !i.index.Next() {
+			i.indexErr()
+			return false
+		}
+		i.setData()
+		return i.Next()
+	}
+	return true
+}
+
+func (i *indexedIterator) Prev() bool {
+	if i.err != nil {
+		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	switch {
+	case i.data != nil && !i.data.Prev():
+		if i.dataErr() {
+			return false
+		}
+		i.clearData()
+		fallthrough
+	case i.data == nil:
+		if !i.index.Prev() {
+			i.indexErr()
+			return false
+		}
+		i.setData()
+		if !i.data.Last() {
+			if i.dataErr() {
+				return false
+			}
+			i.clearData()
+			return i.Prev()
+		}
+	}
+	return true
+}
+
+func (i *indexedIterator) Key() []byte {
+	if i.data == nil {
+		return nil
+	}
+	return i.data.Key()
+}
+
+func (i *indexedIterator) Value() []byte {
+	if i.data == nil {
+		return nil
+	}
+	return i.data.Value()
+}
+
+func (i *indexedIterator) Release() {
+	i.clearData()
+	i.index.Release()
+	i.BasicReleaser.Release()
+}
+
+func (i *indexedIterator) Error() error {
+	if i.err != nil {
+		return i.err
+	}
+	if err := i.index.Error(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (i *indexedIterator) SetErrorCallback(f func(err error)) {
+	i.errf = f
+}
+
+// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
+// that returns another iterator, a 'data iterator'. A 'data iterator' is the
+// iterator that contains actual key/value pairs.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
+// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
+// ignored and will halt the iterator.
+func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
+	return &indexedIterator{index: index, strict: strict}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
new file mode 100644
index 0000000..105f22e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
@@ -0,0 +1,83 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator_test
+
+import (
+	"sort"
+
+	. "github.com/onsi/ginkgo"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	. "git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+type keyValue struct {
+	key []byte
+	testutil.KeyValue
+}
+
+type keyValueIndex []keyValue
+
+func (x keyValueIndex) Search(key []byte) int {
+	return sort.Search(x.Len(), func(i int) bool {
+		return comparer.DefaultComparer.Compare(x[i].key, key) >= 0
+	})
+}
+
+func (x keyValueIndex) Len() int                        { return len(x) }
+func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil }
+func (x keyValueIndex) Get(i int) Iterator              { return NewArrayIterator(x[i]) }
+
+var _ = testutil.Defer(func() {
+	Describe("Indexed iterator", func() {
+		Test := func(n ...int) func() {
+			if len(n) == 0 {
+				rnd := testutil.NewRand()
+				n = make([]int, rnd.Intn(17)+3)
+				for i := range n {
+					n[i] = rnd.Intn(19) + 1
+				}
+			}
+
+			return func() {
+				It("Should iterates and seeks correctly", func(done Done) {
+					// Build key/value.
+					index := make(keyValueIndex, len(n))
+					sum := 0
+					for _, x := range n {
+						sum += x
+					}
+					kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4)
+					for i, j := 0, 0; i < len(n); i++ {
+						for x := n[i]; x > 0; x-- {
+							key, value := kv.Index(j)
+							index[i].key = key
+							index[i].Put(key, value)
+							j++
+						}
+					}
+
+					// Test the iterator.
+					t := testutil.IteratorTesting{
+						KeyValue: kv.Clone(),
+						Iter:     NewIndexedIterator(NewArrayIndexer(index), true),
+					}
+					testutil.DoIteratorTesting(&t)
+					done <- true
+				}, 1.5)
+			}
+		}
+
+		Describe("with 100 keys", Test(100))
+		Describe("with 50-50 keys", Test(50, 50))
+		Describe("with 50-1 keys", Test(50, 1))
+		Describe("with 50-1-50 keys", Test(50, 1, 50))
+		Describe("with 1-50 keys", Test(1, 50))
+		Describe("with random N-keys", Test())
+	})
+})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
new file mode 100644
index 0000000..86cf8c6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -0,0 +1,131 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package iterator provides interface and implementation to traverse over
+// contents of a database.
+package iterator
+
+import (
+	"errors"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	ErrIterReleased = errors.New("leveldb/iterator: iterator released")
+)
+
+// IteratorSeeker is the interface that wraps the 'seeks method'.
+type IteratorSeeker interface {
+	// First moves the iterator to the first key/value pair. If the iterator
+	// only contains one key/value pair then First and Last whould moves
+	// to the same key/value pair.
+	// It returns whether such pair exist.
+	First() bool
+
+	// Last moves the iterator to the last key/value pair. If the iterator
+	// only contains one key/value pair then First and Last whould moves
+	// to the same key/value pair.
+	// It returns whether such pair exist.
+	Last() bool
+
+	// Seek moves the iterator to the first key/value pair whose key is greater
+	// than or equal to the given key.
+	// It returns whether such pair exist.
+	//
+	// It is safe to modify the contents of the argument after Seek returns.
+	Seek(key []byte) bool
+
+	// Next moves the iterator to the next key/value pair.
+	// It returns whether the iterator is exhausted.
+	Next() bool
+
+	// Prev moves the iterator to the previous key/value pair.
+	// It returns whether the iterator is exhausted.
+	Prev() bool
+}
+
+// CommonIterator is the interface that wraps common interator methods.
+type CommonIterator interface {
+	IteratorSeeker
+
+	// util.Releaser is the interface that wraps basic Release method.
+	// When called Release will releases any resources associated with the
+	// iterator.
+	util.Releaser
+
+	// util.ReleaseSetter is the interface that wraps the basic SetReleaser
+	// method.
+	util.ReleaseSetter
+
+	// TODO: Remove this when ready.
+	Valid() bool
+
+	// Error returns any accumulated error. Exhausting all the key/value pairs
+	// is not considered to be an error.
+	Error() error
+}
+
+// Iterator iterates over a DB's key/value pairs in key order.
+//
+// When encouter an error any 'seeks method' will return false and will
+// yield no key/value pairs. The error can be queried by calling the Error
+// method. Calling Release is still necessary.
+//
+// An iterator must be released after use, but it is not necessary to read
+// an iterator until exhaustion.
+// Also, an iterator is not necessarily goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+type Iterator interface {
+	CommonIterator
+
+	// Key returns the key of the current key/value pair, or nil if done.
+	// The caller should not modify the contents of the returned slice, and
+	// its contents may change on the next call to any 'seeks method'.
+	Key() []byte
+
+	// Value returns the key of the current key/value pair, or nil if done.
+	// The caller should not modify the contents of the returned slice, and
+	// its contents may change on the next call to any 'seeks method'.
+	Value() []byte
+}
+
+// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback
+// method.
+//
+// ErrorCallbackSetter implemented by indexed and merged iterator.
+type ErrorCallbackSetter interface {
+	// SetErrorCallback allows set an error callback of the coresponding
+	// iterator. Use nil to clear the callback.
+	SetErrorCallback(f func(err error))
+}
+
+type emptyIterator struct {
+	util.BasicReleaser
+	err error
+}
+
+func (i *emptyIterator) rErr() {
+	if i.err == nil && i.Released() {
+		i.err = ErrIterReleased
+	}
+}
+
+func (*emptyIterator) Valid() bool            { return false }
+func (i *emptyIterator) First() bool          { i.rErr(); return false }
+func (i *emptyIterator) Last() bool           { i.rErr(); return false }
+func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false }
+func (i *emptyIterator) Next() bool           { i.rErr(); return false }
+func (i *emptyIterator) Prev() bool           { i.rErr(); return false }
+func (*emptyIterator) Key() []byte            { return nil }
+func (*emptyIterator) Value() []byte          { return nil }
+func (i *emptyIterator) Error() error         { return i.err }
+
+// NewEmptyIterator creates an empty iterator. The err parameter can be
+// nil, but if not nil the given err will be returned by Error method.
+func NewEmptyIterator(err error) Iterator {
+	return &emptyIterator{err: err}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
new file mode 100644
index 0000000..0d73149
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
@@ -0,0 +1,11 @@
+package iterator_test
+
+import (
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+func TestIterator(t *testing.T) {
+	testutil.RunSuite(t, "Iterator Suite")
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
new file mode 100644
index 0000000..b0c4bc3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type dir int
+
+const (
+	dirReleased dir = iota - 1
+	dirSOI
+	dirEOI
+	dirBackward
+	dirForward
+)
+
+type mergedIterator struct {
+	cmp    comparer.Comparer
+	iters  []Iterator
+	strict bool
+
+	keys     [][]byte
+	index    int
+	dir      dir
+	err      error
+	errf     func(err error)
+	releaser util.Releaser
+}
+
+func assertKey(key []byte) []byte {
+	if key == nil {
+		panic("leveldb/iterator: nil key")
+	}
+	return key
+}
+
+func (i *mergedIterator) iterErr(iter Iterator) bool {
+	if err := iter.Error(); err != nil {
+		if i.errf != nil {
+			i.errf(err)
+		}
+		if i.strict || !errors.IsCorrupted(err) {
+			i.err = err
+			return true
+		}
+	}
+	return false
+}
+
+func (i *mergedIterator) Valid() bool {
+	return i.err == nil && i.dir > dirEOI
+}
+
+func (i *mergedIterator) First() bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	for x, iter := range i.iters {
+		switch {
+		case iter.First():
+			i.keys[x] = assertKey(iter.Key())
+		case i.iterErr(iter):
+			return false
+		default:
+			i.keys[x] = nil
+		}
+	}
+	i.dir = dirSOI
+	return i.next()
+}
+
+func (i *mergedIterator) Last() bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	for x, iter := range i.iters {
+		switch {
+		case iter.Last():
+			i.keys[x] = assertKey(iter.Key())
+		case i.iterErr(iter):
+			return false
+		default:
+			i.keys[x] = nil
+		}
+	}
+	i.dir = dirEOI
+	return i.prev()
+}
+
+func (i *mergedIterator) Seek(key []byte) bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	for x, iter := range i.iters {
+		switch {
+		case iter.Seek(key):
+			i.keys[x] = assertKey(iter.Key())
+		case i.iterErr(iter):
+			return false
+		default:
+			i.keys[x] = nil
+		}
+	}
+	i.dir = dirSOI
+	return i.next()
+}
+
+func (i *mergedIterator) next() bool {
+	var key []byte
+	if i.dir == dirForward {
+		key = i.keys[i.index]
+	}
+	for x, tkey := range i.keys {
+		if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) {
+			key = tkey
+			i.index = x
+		}
+	}
+	if key == nil {
+		i.dir = dirEOI
+		return false
+	}
+	i.dir = dirForward
+	return true
+}
+
+func (i *mergedIterator) Next() bool {
+	if i.dir == dirEOI || i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	switch i.dir {
+	case dirSOI:
+		return i.First()
+	case dirBackward:
+		key := append([]byte{}, i.keys[i.index]...)
+		if !i.Seek(key) {
+			return false
+		}
+		return i.Next()
+	}
+
+	x := i.index
+	iter := i.iters[x]
+	switch {
+	case iter.Next():
+		i.keys[x] = assertKey(iter.Key())
+	case i.iterErr(iter):
+		return false
+	default:
+		i.keys[x] = nil
+	}
+	return i.next()
+}
+
+func (i *mergedIterator) prev() bool {
+	var key []byte
+	if i.dir == dirBackward {
+		key = i.keys[i.index]
+	}
+	for x, tkey := range i.keys {
+		if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) {
+			key = tkey
+			i.index = x
+		}
+	}
+	if key == nil {
+		i.dir = dirSOI
+		return false
+	}
+	i.dir = dirBackward
+	return true
+}
+
+func (i *mergedIterator) Prev() bool {
+	if i.dir == dirSOI || i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	switch i.dir {
+	case dirEOI:
+		return i.Last()
+	case dirForward:
+		key := append([]byte{}, i.keys[i.index]...)
+		for x, iter := range i.iters {
+			if x == i.index {
+				continue
+			}
+			seek := iter.Seek(key)
+			switch {
+			case seek && iter.Prev(), !seek && iter.Last():
+				i.keys[x] = assertKey(iter.Key())
+			case i.iterErr(iter):
+				return false
+			default:
+				i.keys[x] = nil
+			}
+		}
+	}
+
+	x := i.index
+	iter := i.iters[x]
+	switch {
+	case iter.Prev():
+		i.keys[x] = assertKey(iter.Key())
+	case i.iterErr(iter):
+		return false
+	default:
+		i.keys[x] = nil
+	}
+	return i.prev()
+}
+
+func (i *mergedIterator) Key() []byte {
+	if i.err != nil || i.dir <= dirEOI {
+		return nil
+	}
+	return i.keys[i.index]
+}
+
+func (i *mergedIterator) Value() []byte {
+	if i.err != nil || i.dir <= dirEOI {
+		return nil
+	}
+	return i.iters[i.index].Value()
+}
+
+func (i *mergedIterator) Release() {
+	if i.dir != dirReleased {
+		i.dir = dirReleased
+		for _, iter := range i.iters {
+			iter.Release()
+		}
+		i.iters = nil
+		i.keys = nil
+		if i.releaser != nil {
+			i.releaser.Release()
+			i.releaser = nil
+		}
+	}
+}
+
+func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
+	if i.dir == dirReleased {
+		panic(util.ErrReleased)
+	}
+	if i.releaser != nil && releaser != nil {
+		panic(util.ErrHasReleaser)
+	}
+	i.releaser = releaser
+}
+
+func (i *mergedIterator) Error() error {
+	return i.err
+}
+
+func (i *mergedIterator) SetErrorCallback(f func(err error)) {
+	i.errf = f
+}
+
+// NewMergedIterator returns an iterator that merges its input. Walking the
+// resultant iterator will return all key/value pairs of all input iterators
+// in strictly increasing key order, as defined by cmp.
+// The input's key ranges may overlap, but there are assumed to be no duplicate
+// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
+// None of the iters may be nil.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'merged iterator', otherwise the iterator will
+// continue to the next 'input iterator'.
+func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
+	return &mergedIterator{
+		iters:  iters,
+		cmp:    cmp,
+		strict: strict,
+		keys:   make([][]byte, len(iters)),
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go
new file mode 100644
index 0000000..a8a805a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator_test
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	. "git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+var _ = testutil.Defer(func() {
+	Describe("Merged iterator", func() {
+		Test := func(filled int, empty int) func() {
+			return func() {
+				It("Should iterates and seeks correctly", func(done Done) {
+					rnd := testutil.NewRand()
+
+					// Build key/value.
+					filledKV := make([]testutil.KeyValue, filled)
+					kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4)
+					kv.Iterate(func(i int, key, value []byte) {
+						filledKV[rnd.Intn(filled)].Put(key, value)
+					})
+
+					// Create itearators.
+					iters := make([]Iterator, filled+empty)
+					for i := range iters {
+						if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) {
+							filled--
+							Expect(filledKV[filled].Len()).ShouldNot(BeZero())
+							iters[i] = NewArrayIterator(filledKV[filled])
+						} else {
+							empty--
+							iters[i] = NewEmptyIterator(nil)
+						}
+					}
+
+					// Test the iterator.
+					t := testutil.IteratorTesting{
+						KeyValue: kv.Clone(),
+						Iter:     NewMergedIterator(iters, comparer.DefaultComparer, true),
+					}
+					testutil.DoIteratorTesting(&t)
+					done <- true
+				}, 1.5)
+			}
+		}
+
+		Describe("with three, all filled iterators", Test(3, 0))
+		Describe("with one filled, one empty iterators", Test(1, 1))
+		Describe("with one filled, two empty iterators", Test(1, 2))
+	})
+})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
new file mode 100644
index 0000000..6b63b18
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
@@ -0,0 +1,520 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0
+// License, authors and contributors informations can be found at bellow URLs respectively:
+// 	https://code.google.com/p/leveldb-go/source/browse/LICENSE
+//	https://code.google.com/p/leveldb-go/source/browse/AUTHORS
+//  https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
+
+// Package journal reads and writes sequences of journals. Each journal is a stream
+// of bytes that completes before the next journal starts.
+//
+// When reading, call Next to obtain an io.Reader for the next journal. Next will
+// return io.EOF when there are no more journals. It is valid to call Next
+// without reading the current journal to exhaustion.
+//
+// When writing, call Next to obtain an io.Writer for the next journal. Calling
+// Next finishes the current journal. Call Close to finish the final journal.
+//
+// Optionally, call Flush to finish the current journal and flush the underlying
+// writer without starting a new journal. To start a new journal after flushing,
+// call Next.
+//
+// Neither Readers or Writers are safe to use concurrently.
+//
+// Example code:
+//	func read(r io.Reader) ([]string, error) {
+//		var ss []string
+//		journals := journal.NewReader(r, nil, true, true)
+//		for {
+//			j, err := journals.Next()
+//			if err == io.EOF {
+//				break
+//			}
+//			if err != nil {
+//				return nil, err
+//			}
+//			s, err := ioutil.ReadAll(j)
+//			if err != nil {
+//				return nil, err
+//			}
+//			ss = append(ss, string(s))
+//		}
+//		return ss, nil
+//	}
+//
+//	func write(w io.Writer, ss []string) error {
+//		journals := journal.NewWriter(w)
+//		for _, s := range ss {
+//			j, err := journals.Next()
+//			if err != nil {
+//				return err
+//			}
+//			if _, err := j.Write([]byte(s)), err != nil {
+//				return err
+//			}
+//		}
+//		return journals.Close()
+//	}
+//
+// The wire format is that the stream is divided into 32KiB blocks, and each
+// block contains a number of tightly packed chunks. Chunks cannot cross block
+// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a
+// block must be zero.
+//
+// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4
+// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type)
+// followed by a payload. The checksum is over the chunk type and the payload.
+//
+// There are four chunk types: whether the chunk is the full journal, or the
+// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal
+// has one first chunk, zero or more middle chunks, and one last chunk.
+//
+// The wire format allows for limited recovery in the face of data corruption:
+// on a format error (such as a checksum mismatch), the reader moves to the
+// next block and looks for the next full or first chunk.
+package journal
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// These constants are part of the wire format and should not be changed.
+const (
+	fullChunkType   = 1
+	firstChunkType  = 2
+	middleChunkType = 3
+	lastChunkType   = 4
+)
+
+const (
+	blockSize  = 32 * 1024
+	headerSize = 7
+)
+
+type flusher interface {
+	Flush() error
+}
+
+// ErrCorrupted is the error type that generated by corrupted block or chunk.
+type ErrCorrupted struct {
+	Size   int
+	Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+	return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
+}
+
+// Dropper is the interface that wrap simple Drop method. The Drop
+// method will be called when the journal reader dropping a block or chunk.
+type Dropper interface {
+	Drop(err error)
+}
+
+// Reader reads journals from an underlying io.Reader.
+type Reader struct {
+	// r is the underlying reader.
+	r io.Reader
+	// the dropper.
+	dropper Dropper
+	// strict flag.
+	strict bool
+	// checksum flag.
+	checksum bool
+	// seq is the sequence number of the current journal.
+	seq int
+	// buf[i:j] is the unread portion of the current chunk's payload.
+	// The low bound, i, excludes the chunk header.
+	i, j int
+	// n is the number of bytes of buf that are valid. Once reading has started,
+	// only the final block can have n < blockSize.
+	n int
+	// last is whether the current chunk is the last chunk of the journal.
+	last bool
+	// err is any accumulated error.
+	err error
+	// buf is the buffer.
+	buf [blockSize]byte
+}
+
+// NewReader returns a new reader. The dropper may be nil, and if
+// strict is true then corrupted or invalid chunk will halt the journal
+// reader entirely.
+func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
+	return &Reader{
+		r:        r,
+		dropper:  dropper,
+		strict:   strict,
+		checksum: checksum,
+		last:     true,
+	}
+}
+
+var errSkip = errors.New("leveldb/journal: skipped")
+
+func (r *Reader) corrupt(n int, reason string, skip bool) error {
+	if r.dropper != nil {
+		r.dropper.Drop(&ErrCorrupted{n, reason})
+	}
+	if r.strict && !skip {
+		r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason})
+		return r.err
+	}
+	return errSkip
+}
+
+// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
+// next block into the buffer if necessary.
+func (r *Reader) nextChunk(first bool) error {
+	for {
+		if r.j+headerSize <= r.n {
+			checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
+			length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
+			chunkType := r.buf[r.j+6]
+
+			if checksum == 0 && length == 0 && chunkType == 0 {
+				// Drop entire block.
+				m := r.n - r.j
+				r.i = r.n
+				r.j = r.n
+				return r.corrupt(m, "zero header", false)
+			} else {
+				m := r.n - r.j
+				r.i = r.j + headerSize
+				r.j = r.j + headerSize + int(length)
+				if r.j > r.n {
+					// Drop entire block.
+					r.i = r.n
+					r.j = r.n
+					return r.corrupt(m, "chunk length overflows block", false)
+				} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
+					// Drop entire block.
+					r.i = r.n
+					r.j = r.n
+					return r.corrupt(m, "checksum mismatch", false)
+				}
+			}
+			if first && chunkType != fullChunkType && chunkType != firstChunkType {
+				m := r.j - r.i
+				r.i = r.j
+				// Report the error, but skip it.
+				return r.corrupt(m+headerSize, "orphan chunk", true)
+			}
+			r.last = chunkType == fullChunkType || chunkType == lastChunkType
+			return nil
+		}
+
+		// The last block.
+		if r.n < blockSize && r.n > 0 {
+			if !first {
+				return r.corrupt(0, "missing chunk part", false)
+			}
+			r.err = io.EOF
+			return r.err
+		}
+
+		// Read block.
+		n, err := io.ReadFull(r.r, r.buf[:])
+		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+			return err
+		}
+		if n == 0 {
+			if !first {
+				return r.corrupt(0, "missing chunk part", false)
+			}
+			r.err = io.EOF
+			return r.err
+		}
+		r.i, r.j, r.n = 0, 0, n
+	}
+}
+
+// Next returns a reader for the next journal. It returns io.EOF if there are no
+// more journals. The reader returned becomes stale after the next Next call,
+// and should no longer be used. If strict is false, the reader will returns
+// io.ErrUnexpectedEOF error when found corrupted journal.
+func (r *Reader) Next() (io.Reader, error) {
+	r.seq++
+	if r.err != nil {
+		return nil, r.err
+	}
+	r.i = r.j
+	for {
+		if err := r.nextChunk(true); err == nil {
+			break
+		} else if err != errSkip {
+			return nil, err
+		}
+	}
+	return &singleReader{r, r.seq, nil}, nil
+}
+
+// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
+// last accumulated error.
+func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
+	r.seq++
+	err := r.err
+	r.r = reader
+	r.dropper = dropper
+	r.strict = strict
+	r.checksum = checksum
+	r.i = 0
+	r.j = 0
+	r.n = 0
+	r.last = true
+	r.err = nil
+	return err
+}
+
+type singleReader struct {
+	r   *Reader
+	seq int
+	err error
+}
+
+func (x *singleReader) Read(p []byte) (int, error) {
+	r := x.r
+	if r.seq != x.seq {
+		return 0, errors.New("leveldb/journal: stale reader")
+	}
+	if x.err != nil {
+		return 0, x.err
+	}
+	if r.err != nil {
+		return 0, r.err
+	}
+	for r.i == r.j {
+		if r.last {
+			return 0, io.EOF
+		}
+		x.err = r.nextChunk(false)
+		if x.err != nil {
+			if x.err == errSkip {
+				x.err = io.ErrUnexpectedEOF
+			}
+			return 0, x.err
+		}
+	}
+	n := copy(p, r.buf[r.i:r.j])
+	r.i += n
+	return n, nil
+}
+
+func (x *singleReader) ReadByte() (byte, error) {
+	r := x.r
+	if r.seq != x.seq {
+		return 0, errors.New("leveldb/journal: stale reader")
+	}
+	if x.err != nil {
+		return 0, x.err
+	}
+	if r.err != nil {
+		return 0, r.err
+	}
+	for r.i == r.j {
+		if r.last {
+			return 0, io.EOF
+		}
+		x.err = r.nextChunk(false)
+		if x.err != nil {
+			if x.err == errSkip {
+				x.err = io.ErrUnexpectedEOF
+			}
+			return 0, x.err
+		}
+	}
+	c := r.buf[r.i]
+	r.i++
+	return c, nil
+}
+
+// Writer writes journals to an underlying io.Writer.
+type Writer struct {
+	// w is the underlying writer.
+	w io.Writer
+	// seq is the sequence number of the current journal.
+	seq int
+	// f is w as a flusher.
+	f flusher
+	// buf[i:j] is the bytes that will become the current chunk.
+	// The low bound, i, includes the chunk header.
+	i, j int
+	// buf[:written] has already been written to w.
+	// written is zero unless Flush has been called.
+	written int
+	// first is whether the current chunk is the first chunk of the journal.
+	first bool
+	// pending is whether a chunk is buffered but not yet written.
+	pending bool
+	// err is any accumulated error.
+	err error
+	// buf is the buffer.
+	buf [blockSize]byte
+}
+
+// NewWriter returns a new Writer.
+func NewWriter(w io.Writer) *Writer {
+	f, _ := w.(flusher)
+	return &Writer{
+		w: w,
+		f: f,
+	}
+}
+
+// fillHeader fills in the header for the pending chunk.
+func (w *Writer) fillHeader(last bool) {
+	if w.i+headerSize > w.j || w.j > blockSize {
+		panic("leveldb/journal: bad writer state")
+	}
+	if last {
+		if w.first {
+			w.buf[w.i+6] = fullChunkType
+		} else {
+			w.buf[w.i+6] = lastChunkType
+		}
+	} else {
+		if w.first {
+			w.buf[w.i+6] = firstChunkType
+		} else {
+			w.buf[w.i+6] = middleChunkType
+		}
+	}
+	binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value())
+	binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize))
+}
+
+// writeBlock writes the buffered block to the underlying writer, and reserves
+// space for the next chunk's header.
+func (w *Writer) writeBlock() {
+	_, w.err = w.w.Write(w.buf[w.written:])
+	w.i = 0
+	w.j = headerSize
+	w.written = 0
+}
+
+// writePending finishes the current journal and writes the buffer to the
+// underlying writer.
+func (w *Writer) writePending() {
+	if w.err != nil {
+		return
+	}
+	if w.pending {
+		w.fillHeader(true)
+		w.pending = false
+	}
+	_, w.err = w.w.Write(w.buf[w.written:w.j])
+	w.written = w.j
+}
+
+// Close finishes the current journal and closes the writer.
+func (w *Writer) Close() error {
+	w.seq++
+	w.writePending()
+	if w.err != nil {
+		return w.err
+	}
+	w.err = errors.New("leveldb/journal: closed Writer")
+	return nil
+}
+
+// Flush finishes the current journal, writes to the underlying writer, and
+// flushes it if that writer implements interface{ Flush() error }.
+func (w *Writer) Flush() error {
+	w.seq++
+	w.writePending()
+	if w.err != nil {
+		return w.err
+	}
+	if w.f != nil {
+		w.err = w.f.Flush()
+		return w.err
+	}
+	return nil
+}
+
+// Reset resets the journal writer, allows reuse of the journal writer. Reset
+// will also closes the journal writer if not already.
+func (w *Writer) Reset(writer io.Writer) (err error) {
+	w.seq++
+	if w.err == nil {
+		w.writePending()
+		err = w.err
+	}
+	w.w = writer
+	w.f, _ = writer.(flusher)
+	w.i = 0
+	w.j = 0
+	w.written = 0
+	w.first = false
+	w.pending = false
+	w.err = nil
+	return
+}
+
+// Next returns a writer for the next journal. The writer returned becomes stale
+// after the next Close, Flush or Next call, and should no longer be used.
+func (w *Writer) Next() (io.Writer, error) {
+	w.seq++
+	if w.err != nil {
+		return nil, w.err
+	}
+	if w.pending {
+		w.fillHeader(true)
+	}
+	w.i = w.j
+	w.j = w.j + headerSize
+	// Check if there is room in the block for the header.
+	if w.j > blockSize {
+		// Fill in the rest of the block with zeroes.
+		for k := w.i; k < blockSize; k++ {
+			w.buf[k] = 0
+		}
+		w.writeBlock()
+		if w.err != nil {
+			return nil, w.err
+		}
+	}
+	w.first = true
+	w.pending = true
+	return singleWriter{w, w.seq}, nil
+}
+
+type singleWriter struct {
+	w   *Writer
+	seq int
+}
+
+func (x singleWriter) Write(p []byte) (int, error) {
+	w := x.w
+	if w.seq != x.seq {
+		return 0, errors.New("leveldb/journal: stale writer")
+	}
+	if w.err != nil {
+		return 0, w.err
+	}
+	n0 := len(p)
+	for len(p) > 0 {
+		// Write a block, if it is full.
+		if w.j == blockSize {
+			w.fillHeader(false)
+			w.writeBlock()
+			if w.err != nil {
+				return 0, w.err
+			}
+			w.first = false
+		}
+		// Copy bytes into the buffer.
+		n := copy(w.buf[w.j:], p)
+		w.j += n
+		p = p[n:]
+	}
+	return n0, nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
new file mode 100644
index 0000000..0fcf225
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
@@ -0,0 +1,818 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135
+// License, authors and contributors informations can be found at bellow URLs respectively:
+// 	https://code.google.com/p/leveldb-go/source/browse/LICENSE
+//	https://code.google.com/p/leveldb-go/source/browse/AUTHORS
+//  https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
+
+package journal
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"strings"
+	"testing"
+)
+
+type dropper struct {
+	t *testing.T
+}
+
+func (d dropper) Drop(err error) {
+	d.t.Log(err)
+}
+
+func short(s string) string {
+	if len(s) < 64 {
+		return s
+	}
+	return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:])
+}
+
+// big returns a string of length n, composed of repetitions of partial.
+func big(partial string, n int) string {
+	return strings.Repeat(partial, n/len(partial)+1)[:n]
+}
+
+func TestEmpty(t *testing.T) {
+	buf := new(bytes.Buffer)
+	r := NewReader(buf, dropper{t}, true, true)
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("got %v, want %v", err, io.EOF)
+	}
+}
+
+func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) {
+	buf := new(bytes.Buffer)
+
+	reset()
+	w := NewWriter(buf)
+	for {
+		s, ok := gen()
+		if !ok {
+			break
+		}
+		ww, err := w.Next()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if _, err := ww.Write([]byte(s)); err != nil {
+			t.Fatal(err)
+		}
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	reset()
+	r := NewReader(buf, dropper{t}, true, true)
+	for {
+		s, ok := gen()
+		if !ok {
+			break
+		}
+		rr, err := r.Next()
+		if err != nil {
+			t.Fatal(err)
+		}
+		x, err := ioutil.ReadAll(rr)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if string(x) != s {
+			t.Fatalf("got %q, want %q", short(string(x)), short(s))
+		}
+	}
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("got %v, want %v", err, io.EOF)
+	}
+}
+
+func testLiterals(t *testing.T, s []string) {
+	var i int
+	reset := func() {
+		i = 0
+	}
+	gen := func() (string, bool) {
+		if i == len(s) {
+			return "", false
+		}
+		i++
+		return s[i-1], true
+	}
+	testGenerator(t, reset, gen)
+}
+
+func TestMany(t *testing.T) {
+	const n = 1e5
+	var i int
+	reset := func() {
+		i = 0
+	}
+	gen := func() (string, bool) {
+		if i == n {
+			return "", false
+		}
+		i++
+		return fmt.Sprintf("%d.", i-1), true
+	}
+	testGenerator(t, reset, gen)
+}
+
+func TestRandom(t *testing.T) {
+	const n = 1e2
+	var (
+		i int
+		r *rand.Rand
+	)
+	reset := func() {
+		i, r = 0, rand.New(rand.NewSource(0))
+	}
+	gen := func() (string, bool) {
+		if i == n {
+			return "", false
+		}
+		i++
+		return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true
+	}
+	testGenerator(t, reset, gen)
+}
+
+func TestBasic(t *testing.T) {
+	testLiterals(t, []string{
+		strings.Repeat("a", 1000),
+		strings.Repeat("b", 97270),
+		strings.Repeat("c", 8000),
+	})
+}
+
+func TestBoundary(t *testing.T) {
+	for i := blockSize - 16; i < blockSize+16; i++ {
+		s0 := big("abcd", i)
+		for j := blockSize - 16; j < blockSize+16; j++ {
+			s1 := big("ABCDE", j)
+			testLiterals(t, []string{s0, s1})
+			testLiterals(t, []string{s0, "", s1})
+			testLiterals(t, []string{s0, "x", s1})
+		}
+	}
+}
+
+func TestFlush(t *testing.T) {
+	buf := new(bytes.Buffer)
+	w := NewWriter(buf)
+	// Write a couple of records. Everything should still be held
+	// in the record.Writer buffer, so that buf.Len should be 0.
+	w0, _ := w.Next()
+	w0.Write([]byte("0"))
+	w1, _ := w.Next()
+	w1.Write([]byte("11"))
+	if got, want := buf.Len(), 0; got != want {
+		t.Fatalf("buffer length #0: got %d want %d", got, want)
+	}
+	// Flush the record.Writer buffer, which should yield 17 bytes.
+	// 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes.
+	if err := w.Flush(); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := buf.Len(), 17; got != want {
+		t.Fatalf("buffer length #1: got %d want %d", got, want)
+	}
+	// Do another write, one that isn't large enough to complete the block.
+	// The write should not have flowed through to buf.
+	w2, _ := w.Next()
+	w2.Write(bytes.Repeat([]byte("2"), 10000))
+	if got, want := buf.Len(), 17; got != want {
+		t.Fatalf("buffer length #2: got %d want %d", got, want)
+	}
+	// Flushing should get us up to 10024 bytes written.
+	// 10024 = 17 + 7 + 10000.
+	if err := w.Flush(); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := buf.Len(), 10024; got != want {
+		t.Fatalf("buffer length #3: got %d want %d", got, want)
+	}
+	// Do a bigger write, one that completes the current block.
+	// We should now have 32768 bytes (a complete block), without
+	// an explicit flush.
+	w3, _ := w.Next()
+	w3.Write(bytes.Repeat([]byte("3"), 40000))
+	if got, want := buf.Len(), 32768; got != want {
+		t.Fatalf("buffer length #4: got %d want %d", got, want)
+	}
+	// Flushing should get us up to 50038 bytes written.
+	// 50038 = 10024 + 2*7 + 40000. There are two headers because
+	// the one record was split into two chunks.
+	if err := w.Flush(); err != nil {
+		t.Fatal(err)
+	}
+	if got, want := buf.Len(), 50038; got != want {
+		t.Fatalf("buffer length #5: got %d want %d", got, want)
+	}
+	// Check that reading those records give the right lengths.
+	r := NewReader(buf, dropper{t}, true, true)
+	wants := []int64{1, 2, 10000, 40000}
+	for i, want := range wants {
+		rr, _ := r.Next()
+		n, err := io.Copy(ioutil.Discard, rr)
+		if err != nil {
+			t.Fatalf("read #%d: %v", i, err)
+		}
+		if n != want {
+			t.Fatalf("read #%d: got %d bytes want %d", i, n, want)
+		}
+	}
+}
+
+func TestNonExhaustiveRead(t *testing.T) {
+	const n = 100
+	buf := new(bytes.Buffer)
+	p := make([]byte, 10)
+	rnd := rand.New(rand.NewSource(1))
+
+	w := NewWriter(buf)
+	for i := 0; i < n; i++ {
+		length := len(p) + rnd.Intn(3*blockSize)
+		s := string(uint8(i)) + "123456789abcdefgh"
+		ww, _ := w.Next()
+		ww.Write([]byte(big(s, length)))
+	}
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	r := NewReader(buf, dropper{t}, true, true)
+	for i := 0; i < n; i++ {
+		rr, _ := r.Next()
+		_, err := io.ReadFull(rr, p)
+		if err != nil {
+			t.Fatal(err)
+		}
+		want := string(uint8(i)) + "123456789"
+		if got := string(p); got != want {
+			t.Fatalf("read #%d: got %q want %q", i, got, want)
+		}
+	}
+}
+
+func TestStaleReader(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+	w0, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	w0.Write([]byte("0"))
+	w1, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	w1.Write([]byte("11"))
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	r := NewReader(buf, dropper{t}, true, true)
+	r0, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	r1, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	p := make([]byte, 1)
+	if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") {
+		t.Fatalf("stale read #0: unexpected error: %v", err)
+	}
+	if _, err := r1.Read(p); err != nil {
+		t.Fatalf("fresh read #1: got %v want nil error", err)
+	}
+	if p[0] != '1' {
+		t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0])
+	}
+}
+
+func TestStaleWriter(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+	w0, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	w1, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
+		t.Fatalf("stale write #0: unexpected error: %v", err)
+	}
+	if _, err := w1.Write([]byte("11")); err != nil {
+		t.Fatalf("fresh write #1: got %v want nil error", err)
+	}
+	if err := w.Flush(); err != nil {
+		t.Fatalf("flush: %v", err)
+	}
+	if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
+		t.Fatalf("stale write #1: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_MissingLastBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// Cut the last block.
+	b := buf.Bytes()[:blockSize]
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read.
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if n != blockSize-1024 {
+		t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
+	}
+
+	// Second read.
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #1: unexpected error: %v", err)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	// Fourth record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+		t.Fatalf("write #3: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting block #0.
+	for i := 0; i < 1024; i++ {
+		b[i] = '1'
+	}
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (third record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 1; n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (fourth record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #1: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 2; n != want {
+		t.Fatalf("read #1: got %d bytes want %d", n, want)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	// Fourth record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+		t.Fatalf("write #3: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting block #1.
+	for i := 0; i < 1024; i++ {
+		b[blockSize+i] = '1'
+	}
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (second record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #1: unexpected error: %v", err)
+	}
+
+	// Third read (fourth record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #2: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 2; n != want {
+		t.Fatalf("read #2: got %d bytes want %d", n, want)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_CorruptedLastBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	// Fourth record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+		t.Fatalf("write #3: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting block #3.
+	for i := len(b) - 1; i > len(b)-1024; i-- {
+		b[i] = '1'
+	}
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (second record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #1: %v", err)
+	}
+	if want := int64(blockSize - headerSize); n != want {
+		t.Fatalf("read #1: got %d bytes want %d", n, want)
+	}
+
+	// Third read (third record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #2: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 1; n != want {
+		t.Fatalf("read #2: got %d bytes want %d", n, want)
+	}
+
+	// Fourth read (fourth record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #3: unexpected error: %v", err)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting record #1.
+	x := blockSize
+	binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (second record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #1: unexpected error: %v", err)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting record #1.
+	x := blockSize/2 + headerSize
+	binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (third record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #1: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 1; n != want {
+		t.Fatalf("read #1: got %d bytes want %d", n, want)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
new file mode 100644
index 0000000..6be715c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+type ErrIkeyCorrupted struct {
+	Ikey   []byte
+	Reason string
+}
+
+func (e *ErrIkeyCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
+}
+
+func newErrIkeyCorrupted(ikey []byte, reason string) error {
+	return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
+}
+
+type kType int
+
+func (kt kType) String() string {
+	switch kt {
+	case ktDel:
+		return "d"
+	case ktVal:
+		return "v"
+	}
+	return "x"
+}
+
+// Value types encoded as the last component of internal keys.
+// Don't modify; this value are saved to disk.
+const (
+	ktDel kType = iota
+	ktVal
+)
+
+// ktSeek defines the kType that should be passed when constructing an
+// internal key for seeking to a particular sequence number (since we
+// sort sequence numbers in decreasing order and the value type is
+// embedded as the low 8 bits in the sequence number in internal keys,
+// we need to use the highest-numbered ValueType, not the lowest).
+const ktSeek = ktVal
+
+const (
+	// Maximum value possible for sequence number; the 8-bits are
+	// used by value type, so its can packed together in single
+	// 64-bit integer.
+	kMaxSeq uint64 = (uint64(1) << 56) - 1
+	// Maximum value possible for packed sequence number and type.
+	kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
+)
+
+// Maximum number encoded in bytes.
+var kMaxNumBytes = make([]byte, 8)
+
+func init() {
+	binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
+}
+
+type iKey []byte
+
+func newIkey(ukey []byte, seq uint64, kt kType) iKey {
+	if seq > kMaxSeq {
+		panic("leveldb: invalid sequence number")
+	} else if kt > ktVal {
+		panic("leveldb: invalid type")
+	}
+
+	ik := make(iKey, len(ukey)+8)
+	copy(ik, ukey)
+	binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
+	return ik
+}
+
+func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
+	if len(ik) < 8 {
+		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
+	}
+	num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
+	seq, kt = uint64(num>>8), kType(num&0xff)
+	if kt > ktVal {
+		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
+	}
+	ukey = ik[:len(ik)-8]
+	return
+}
+
+func validIkey(ik []byte) bool {
+	_, _, _, err := parseIkey(ik)
+	return err == nil
+}
+
+func (ik iKey) assert() {
+	if ik == nil {
+		panic("leveldb: nil iKey")
+	}
+	if len(ik) < 8 {
+		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
+	}
+}
+
+func (ik iKey) ukey() []byte {
+	ik.assert()
+	return ik[:len(ik)-8]
+}
+
+func (ik iKey) num() uint64 {
+	ik.assert()
+	return binary.LittleEndian.Uint64(ik[len(ik)-8:])
+}
+
+func (ik iKey) parseNum() (seq uint64, kt kType) {
+	num := ik.num()
+	seq, kt = uint64(num>>8), kType(num&0xff)
+	if kt > ktVal {
+		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+	}
+	return
+}
+
+func (ik iKey) String() string {
+	if ik == nil {
+		return "<nil>"
+	}
+
+	if ukey, seq, kt, err := parseIkey(ik); err == nil {
+		return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
+	} else {
+		return "<invalid>"
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
new file mode 100644
index 0000000..81ea25d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"bytes"
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+)
+
+var defaultIComparer = &iComparer{comparer.DefaultComparer}
+
+func ikey(key string, seq uint64, kt kType) iKey {
+	return newIkey([]byte(key), uint64(seq), kt)
+}
+
+func shortSep(a, b []byte) []byte {
+	dst := make([]byte, len(a))
+	dst = defaultIComparer.Separator(dst[:0], a, b)
+	if dst == nil {
+		return a
+	}
+	return dst
+}
+
+func shortSuccessor(b []byte) []byte {
+	dst := make([]byte, len(b))
+	dst = defaultIComparer.Successor(dst[:0], b)
+	if dst == nil {
+		return b
+	}
+	return dst
+}
+
+func testSingleKey(t *testing.T, key string, seq uint64, kt kType) {
+	ik := ikey(key, seq, kt)
+
+	if !bytes.Equal(ik.ukey(), []byte(key)) {
+		t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
+	}
+
+	rseq, rt := ik.parseNum()
+	if rseq != seq {
+		t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
+	}
+	if rt != kt {
+		t.Errorf("type does not equal, got %v, want %v", rt, kt)
+	}
+
+	if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil {
+		if !bytes.Equal(rukey, []byte(key)) {
+			t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
+		}
+		if rseq != seq {
+			t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
+		}
+		if rt != kt {
+			t.Errorf("type does not equal, got %v, want %v", rt, kt)
+		}
+	} else {
+		t.Errorf("key error: %v", kerr)
+	}
+}
+
+func TestIkey_EncodeDecode(t *testing.T) {
+	keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
+	seqs := []uint64{
+		1, 2, 3,
+		(1 << 8) - 1, 1 << 8, (1 << 8) + 1,
+		(1 << 16) - 1, 1 << 16, (1 << 16) + 1,
+		(1 << 32) - 1, 1 << 32, (1 << 32) + 1,
+	}
+	for _, key := range keys {
+		for _, seq := range seqs {
+			testSingleKey(t, key, seq, ktVal)
+			testSingleKey(t, "hello", 1, ktDel)
+		}
+	}
+}
+
+func assertBytes(t *testing.T, want, got []byte) {
+	if !bytes.Equal(got, want) {
+		t.Errorf("assert failed, got %v, want %v", got, want)
+	}
+}
+
+func TestIkeyShortSeparator(t *testing.T) {
+	// When user keys are same
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 99, ktVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 101, ktVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 100, ktVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 100, ktDel)))
+
+	// When user keys are misordered
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("bar", 99, ktVal)))
+
+	// When user keys are different, but correctly ordered
+	assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("hello", 200, ktVal)))
+
+	// When start user key is prefix of limit user key
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foobar", 200, ktVal)))
+
+	// When limit user key is prefix of start user key
+	assertBytes(t, ikey("foobar", 100, ktVal),
+		shortSep(ikey("foobar", 100, ktVal),
+			ikey("foo", 200, ktVal)))
+}
+
+func TestIkeyShortestSuccessor(t *testing.T) {
+	assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
+		shortSuccessor(ikey("foo", 100, ktVal)))
+	assertBytes(t, ikey("\xff\xff", 100, ktVal),
+		shortSuccessor(ikey("\xff\xff", 100, ktVal)))
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
new file mode 100644
index 0000000..69aa9d0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
@@ -0,0 +1,11 @@
+package leveldb
+
+import (
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+func TestLevelDB(t *testing.T) {
+	testutil.RunSuite(t, "LevelDB Suite")
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go
new file mode 100644
index 0000000..82270f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package memdb
+
+import (
+	"encoding/binary"
+	"math/rand"
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+)
+
+func BenchmarkPut(b *testing.B) {
+	buf := make([][4]byte, b.N)
+	for i := range buf {
+		binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
+	}
+
+	b.ResetTimer()
+	p := New(comparer.DefaultComparer, 0)
+	for i := range buf {
+		p.Put(buf[i][:], nil)
+	}
+}
+
+func BenchmarkPutRandom(b *testing.B) {
+	buf := make([][4]byte, b.N)
+	for i := range buf {
+		binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int()))
+	}
+
+	b.ResetTimer()
+	p := New(comparer.DefaultComparer, 0)
+	for i := range buf {
+		p.Put(buf[i][:], nil)
+	}
+}
+
+func BenchmarkGet(b *testing.B) {
+	buf := make([][4]byte, b.N)
+	for i := range buf {
+		binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
+	}
+
+	p := New(comparer.DefaultComparer, 0)
+	for i := range buf {
+		p.Put(buf[i][:], nil)
+	}
+
+	b.ResetTimer()
+	for i := range buf {
+		p.Get(buf[i][:])
+	}
+}
+
+func BenchmarkGetRandom(b *testing.B) {
+	buf := make([][4]byte, b.N)
+	for i := range buf {
+		binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
+	}
+
+	p := New(comparer.DefaultComparer, 0)
+	for i := range buf {
+		p.Put(buf[i][:], nil)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		p.Get(buf[rand.Int()%b.N][:])
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
new file mode 100644
index 0000000..6055fce
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -0,0 +1,471 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package memdb provides in-memory key/value database implementation.
+package memdb
+
+import (
+	"math/rand"
+	"sync"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	ErrNotFound     = errors.ErrNotFound
+	ErrIterReleased = errors.New("leveldb/memdb: iterator released")
+)
+
+const tMaxHeight = 12
+
+type dbIter struct {
+	util.BasicReleaser
+	p          *DB
+	slice      *util.Range
+	node       int
+	forward    bool
+	key, value []byte
+	err        error
+}
+
+func (i *dbIter) fill(checkStart, checkLimit bool) bool {
+	if i.node != 0 {
+		n := i.p.nodeData[i.node]
+		m := n + i.p.nodeData[i.node+nKey]
+		i.key = i.p.kvData[n:m]
+		if i.slice != nil {
+			switch {
+			case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0:
+				fallthrough
+			case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0:
+				i.node = 0
+				goto bail
+			}
+		}
+		i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]]
+		return true
+	}
+bail:
+	i.key = nil
+	i.value = nil
+	return false
+}
+
+func (i *dbIter) Valid() bool {
+	return i.node != 0
+}
+
+func (i *dbIter) First() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	i.forward = true
+	i.p.mu.RLock()
+	defer i.p.mu.RUnlock()
+	if i.slice != nil && i.slice.Start != nil {
+		i.node, _ = i.p.findGE(i.slice.Start, false)
+	} else {
+		i.node = i.p.nodeData[nNext]
+	}
+	return i.fill(false, true)
+}
+
+func (i *dbIter) Last() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	i.forward = false
+	i.p.mu.RLock()
+	defer i.p.mu.RUnlock()
+	if i.slice != nil && i.slice.Limit != nil {
+		i.node = i.p.findLT(i.slice.Limit)
+	} else {
+		i.node = i.p.findLast()
+	}
+	return i.fill(true, false)
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	i.forward = true
+	i.p.mu.RLock()
+	defer i.p.mu.RUnlock()
+	if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 {
+		key = i.slice.Start
+	}
+	i.node, _ = i.p.findGE(key, false)
+	return i.fill(false, true)
+}
+
+func (i *dbIter) Next() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.node == 0 {
+		if !i.forward {
+			return i.First()
+		}
+		return false
+	}
+	i.forward = true
+	i.p.mu.RLock()
+	defer i.p.mu.RUnlock()
+	i.node = i.p.nodeData[i.node+nNext]
+	return i.fill(false, true)
+}
+
+func (i *dbIter) Prev() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.node == 0 {
+		if i.forward {
+			return i.Last()
+		}
+		return false
+	}
+	i.forward = false
+	i.p.mu.RLock()
+	defer i.p.mu.RUnlock()
+	i.node = i.p.findLT(i.key)
+	return i.fill(true, false)
+}
+
+func (i *dbIter) Key() []byte {
+	return i.key
+}
+
+func (i *dbIter) Value() []byte {
+	return i.value
+}
+
+func (i *dbIter) Error() error { return i.err }
+
+func (i *dbIter) Release() {
+	if !i.Released() {
+		i.p = nil
+		i.node = 0
+		i.key = nil
+		i.value = nil
+		i.BasicReleaser.Release()
+	}
+}
+
+const (
+	nKV = iota
+	nKey
+	nVal
+	nHeight
+	nNext
+)
+
+// DB is an in-memory key/value database.
+type DB struct {
+	cmp comparer.BasicComparer
+	rnd *rand.Rand
+
+	mu     sync.RWMutex
+	kvData []byte
+	// Node data:
+	// [0]         : KV offset
+	// [1]         : Key length
+	// [2]         : Value length
+	// [3]         : Height
+	// [3..height] : Next nodes
+	nodeData  []int
+	prevNode  [tMaxHeight]int
+	maxHeight int
+	n         int
+	kvSize    int
+}
+
+func (p *DB) randHeight() (h int) {
+	const branching = 4
+	h = 1
+	for h < tMaxHeight && p.rnd.Int()%branching == 0 {
+		h++
+	}
+	return
+}
+
+// Must hold RW-lock if prev == true, as it use shared prevNode slice.
+func (p *DB) findGE(key []byte, prev bool) (int, bool) {
+	node := 0
+	h := p.maxHeight - 1
+	for {
+		next := p.nodeData[node+nNext+h]
+		cmp := 1
+		if next != 0 {
+			o := p.nodeData[next]
+			cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key)
+		}
+		if cmp < 0 {
+			// Keep searching in this list
+			node = next
+		} else {
+			if prev {
+				p.prevNode[h] = node
+			} else if cmp == 0 {
+				return next, true
+			}
+			if h == 0 {
+				return next, cmp == 0
+			}
+			h--
+		}
+	}
+}
+
+func (p *DB) findLT(key []byte) int {
+	node := 0
+	h := p.maxHeight - 1
+	for {
+		next := p.nodeData[node+nNext+h]
+		o := p.nodeData[next]
+		if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 {
+			if h == 0 {
+				break
+			}
+			h--
+		} else {
+			node = next
+		}
+	}
+	return node
+}
+
+func (p *DB) findLast() int {
+	node := 0
+	h := p.maxHeight - 1
+	for {
+		next := p.nodeData[node+nNext+h]
+		if next == 0 {
+			if h == 0 {
+				break
+			}
+			h--
+		} else {
+			node = next
+		}
+	}
+	return node
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (p *DB) Put(key []byte, value []byte) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if node, exact := p.findGE(key, true); exact {
+		kvOffset := len(p.kvData)
+		p.kvData = append(p.kvData, key...)
+		p.kvData = append(p.kvData, value...)
+		p.nodeData[node] = kvOffset
+		m := p.nodeData[node+nVal]
+		p.nodeData[node+nVal] = len(value)
+		p.kvSize += len(value) - m
+		return nil
+	}
+
+	h := p.randHeight()
+	if h > p.maxHeight {
+		for i := p.maxHeight; i < h; i++ {
+			p.prevNode[i] = 0
+		}
+		p.maxHeight = h
+	}
+
+	kvOffset := len(p.kvData)
+	p.kvData = append(p.kvData, key...)
+	p.kvData = append(p.kvData, value...)
+	// Node
+	node := len(p.nodeData)
+	p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
+	for i, n := range p.prevNode[:h] {
+		m := n + nNext + i
+		p.nodeData = append(p.nodeData, p.nodeData[m])
+		p.nodeData[m] = node
+	}
+
+	p.kvSize += len(key) + len(value)
+	p.n++
+	return nil
+}
+
+// Delete deletes the value for the given key. It returns ErrNotFound if
+// the DB does not contain the key.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (p *DB) Delete(key []byte) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	node, exact := p.findGE(key, true)
+	if !exact {
+		return ErrNotFound
+	}
+
+	h := p.nodeData[node+nHeight]
+	for i, n := range p.prevNode[:h] {
+		m := n + 4 + i
+		p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
+	}
+
+	p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal]
+	p.n--
+	return nil
+}
+
+// Contains returns true if the given key are in the DB.
+//
+// It is safe to modify the contents of the arguments after Contains returns.
+func (p *DB) Contains(key []byte) bool {
+	p.mu.RLock()
+	_, exact := p.findGE(key, false)
+	p.mu.RUnlock()
+	return exact
+}
+
+// Get gets the value for the given key. It returns error.ErrNotFound if the
+// DB does not contain the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (p *DB) Get(key []byte) (value []byte, err error) {
+	p.mu.RLock()
+	if node, exact := p.findGE(key, false); exact {
+		o := p.nodeData[node] + p.nodeData[node+nKey]
+		value = p.kvData[o : o+p.nodeData[node+nVal]]
+	} else {
+		err = ErrNotFound
+	}
+	p.mu.RUnlock()
+	return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Find returns.
+func (p *DB) Find(key []byte) (rkey, value []byte, err error) {
+	p.mu.RLock()
+	if node, _ := p.findGE(key, false); node != 0 {
+		n := p.nodeData[node]
+		m := n + p.nodeData[node+nKey]
+		rkey = p.kvData[n:m]
+		value = p.kvData[m : m+p.nodeData[node+nVal]]
+	} else {
+		err = ErrNotFound
+	}
+	p.mu.RUnlock()
+	return
+}
+
+// NewIterator returns an iterator of the DB.
+// The returned iterator is not goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. However, the resultant key/value pairs are not guaranteed
+// to be a consistent snapshot of the DB at a particular point in time.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (p *DB) NewIterator(slice *util.Range) iterator.Iterator {
+	return &dbIter{p: p, slice: slice}
+}
+
+// Capacity returns keys/values buffer capacity.
+func (p *DB) Capacity() int {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return cap(p.kvData)
+}
+
+// Size returns sum of keys and values length. Note that deleted
+// key/value will not be accouted for, but it will still consume
+// the buffer, since the buffer is append only.
+func (p *DB) Size() int {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.kvSize
+}
+
+// Free returns keys/values free buffer before need to grow.
+func (p *DB) Free() int {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return cap(p.kvData) - len(p.kvData)
+}
+
+// Len returns the number of entries in the DB.
+func (p *DB) Len() int {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.n
+}
+
+// Reset resets the DB to initial empty state. Allows reuse the buffer.
+func (p *DB) Reset() {
+	p.mu.Lock()
+	p.rnd = rand.New(rand.NewSource(0xdeadbeef))
+	p.maxHeight = 1
+	p.n = 0
+	p.kvSize = 0
+	p.kvData = p.kvData[:0]
+	p.nodeData = p.nodeData[:nNext+tMaxHeight]
+	p.nodeData[nKV] = 0
+	p.nodeData[nKey] = 0
+	p.nodeData[nVal] = 0
+	p.nodeData[nHeight] = tMaxHeight
+	for n := 0; n < tMaxHeight; n++ {
+		p.nodeData[nNext+n] = 0
+		p.prevNode[n] = 0
+	}
+	p.mu.Unlock()
+}
+
+// New creates a new initalized in-memory key/value DB. The capacity
+// is the initial key/value buffer capacity. The capacity is advisory,
+// not enforced.
+//
+// The returned DB instance is goroutine-safe.
+func New(cmp comparer.BasicComparer, capacity int) *DB {
+	p := &DB{
+		cmp:       cmp,
+		rnd:       rand.New(rand.NewSource(0xdeadbeef)),
+		maxHeight: 1,
+		kvData:    make([]byte, 0, capacity),
+		nodeData:  make([]int, 4+tMaxHeight),
+	}
+	p.nodeData[nHeight] = tMaxHeight
+	return p
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
new file mode 100644
index 0000000..18e96ba
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
@@ -0,0 +1,11 @@
+package memdb
+
+import (
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+func TestMemDB(t *testing.T) {
+	testutil.RunSuite(t, "MemDB Suite")
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
new file mode 100644
index 0000000..0cabd7c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
@@ -0,0 +1,135 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package memdb
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) {
+	p.mu.RLock()
+	if node := p.findLT(key); node != 0 {
+		n := p.nodeData[node]
+		m := n + p.nodeData[node+nKey]
+		rkey = p.kvData[n:m]
+		value = p.kvData[m : m+p.nodeData[node+nVal]]
+	} else {
+		err = ErrNotFound
+	}
+	p.mu.RUnlock()
+	return
+}
+
+func (p *DB) TestFindLast() (rkey, value []byte, err error) {
+	p.mu.RLock()
+	if node := p.findLast(); node != 0 {
+		n := p.nodeData[node]
+		m := n + p.nodeData[node+nKey]
+		rkey = p.kvData[n:m]
+		value = p.kvData[m : m+p.nodeData[node+nVal]]
+	} else {
+		err = ErrNotFound
+	}
+	p.mu.RUnlock()
+	return
+}
+
+func (p *DB) TestPut(key []byte, value []byte) error {
+	p.Put(key, value)
+	return nil
+}
+
+func (p *DB) TestDelete(key []byte) error {
+	p.Delete(key)
+	return nil
+}
+
+func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) {
+	return p.Find(key)
+}
+
+func (p *DB) TestGet(key []byte) (value []byte, err error) {
+	return p.Get(key)
+}
+
+func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator {
+	return p.NewIterator(slice)
+}
+
+var _ = testutil.Defer(func() {
+	Describe("Memdb", func() {
+		Describe("write test", func() {
+			It("should do write correctly", func() {
+				db := New(comparer.DefaultComparer, 0)
+				t := testutil.DBTesting{
+					DB:      db,
+					Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(),
+					PostFn: func(t *testutil.DBTesting) {
+						Expect(db.Len()).Should(Equal(t.Present.Len()))
+						Expect(db.Size()).Should(Equal(t.Present.Size()))
+						switch t.Act {
+						case testutil.DBPut, testutil.DBOverwrite:
+							Expect(db.Contains(t.ActKey)).Should(BeTrue())
+						default:
+							Expect(db.Contains(t.ActKey)).Should(BeFalse())
+						}
+					},
+				}
+				testutil.DoDBTesting(&t)
+			})
+		})
+
+		Describe("read test", func() {
+			testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB {
+				// Building the DB.
+				db := New(comparer.DefaultComparer, 0)
+				kv.IterateShuffled(nil, func(i int, key, value []byte) {
+					db.Put(key, value)
+				})
+
+				if kv.Len() > 1 {
+					It("Should find correct keys with findLT", func() {
+						testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) {
+							key_, key, _ := kv.IndexInexact(i + 1)
+							expectedKey, expectedValue := kv.Index(i)
+
+							// Using key that exist.
+							rkey, rvalue, err := db.TestFindLT(key)
+							Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey)
+							Expect(rkey).Should(Equal(expectedKey), "Key")
+							Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey)
+
+							// Using key that doesn't exist.
+							rkey, rvalue, err = db.TestFindLT(key_)
+							Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey)
+							Expect(rkey).Should(Equal(expectedKey))
+							Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey)
+						})
+					})
+				}
+
+				if kv.Len() > 0 {
+					It("Should find last key with findLast", func() {
+						key, value := kv.Index(kv.Len() - 1)
+						rkey, rvalue, err := db.TestFindLast()
+						Expect(err).ShouldNot(HaveOccurred())
+						Expect(rkey).Should(Equal(key))
+						Expect(rvalue).Should(Equal(value))
+					})
+				}
+
+				return db
+			}, nil, nil)
+		})
+	})
+})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
new file mode 100644
index 0000000..52b50a5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -0,0 +1,682 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package opt provides sets of options used by LevelDB.
+package opt
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+	"math"
+)
+
+const (
+	KiB = 1024
+	MiB = KiB * 1024
+	GiB = MiB * 1024
+)
+
+var (
+	DefaultBlockCacher                   = LRUCacher
+	DefaultBlockCacheCapacity            = 8 * MiB
+	DefaultBlockRestartInterval          = 16
+	DefaultBlockSize                     = 4 * KiB
+	DefaultCompactionExpandLimitFactor   = 25
+	DefaultCompactionGPOverlapsFactor    = 10
+	DefaultCompactionL0Trigger           = 4
+	DefaultCompactionSourceLimitFactor   = 1
+	DefaultCompactionTableSize           = 2 * MiB
+	DefaultCompactionTableSizeMultiplier = 1.0
+	DefaultCompactionTotalSize           = 10 * MiB
+	DefaultCompactionTotalSizeMultiplier = 10.0
+	DefaultCompressionType               = SnappyCompression
+	DefaultIteratorSamplingRate          = 1 * MiB
+	DefaultMaxMemCompationLevel          = 2
+	DefaultNumLevel                      = 7
+	DefaultOpenFilesCacher               = LRUCacher
+	DefaultOpenFilesCacheCapacity        = 500
+	DefaultWriteBuffer                   = 4 * MiB
+	DefaultWriteL0PauseTrigger           = 12
+	DefaultWriteL0SlowdownTrigger        = 8
+)
+
+// Cacher is a caching algorithm.
+type Cacher interface {
+	New(capacity int) cache.Cacher
+}
+
+type CacherFunc struct {
+	NewFunc func(capacity int) cache.Cacher
+}
+
+func (f *CacherFunc) New(capacity int) cache.Cacher {
+	if f.NewFunc != nil {
+		return f.NewFunc(capacity)
+	}
+	return nil
+}
+
+func noCacher(int) cache.Cacher { return nil }
+
+var (
+	// LRUCacher is the LRU-cache algorithm.
+	LRUCacher = &CacherFunc{cache.NewLRU}
+
+	// NoCacher is the value to disable caching algorithm.
+	NoCacher = &CacherFunc{}
+)
+
+// Compression is the 'sorted table' block compression algorithm to use.
+type Compression uint
+
+func (c Compression) String() string {
+	switch c {
+	case DefaultCompression:
+		return "default"
+	case NoCompression:
+		return "none"
+	case SnappyCompression:
+		return "snappy"
+	}
+	return "invalid"
+}
+
+const (
+	DefaultCompression Compression = iota
+	NoCompression
+	SnappyCompression
+	nCompression
+)
+
+// Strict is the DB 'strict level'.
+type Strict uint
+
+const (
+	// If present then a corrupted or invalid chunk or block in manifest
+	// journal will cause an error instead of being dropped.
+	// This will prevent database with corrupted manifest to be opened.
+	StrictManifest Strict = 1 << iota
+
+	// If present then journal chunk checksum will be verified.
+	StrictJournalChecksum
+
+	// If present then a corrupted or invalid chunk or block in journal
+	// will cause an error instead of being dropped.
+	// This will prevent database with corrupted journal to be opened.
+	StrictJournal
+
+	// If present then 'sorted table' block checksum will be verified.
+	// This has effect on both 'read operation' and compaction.
+	StrictBlockChecksum
+
+	// If present then a corrupted 'sorted table' will fails compaction.
+	// The database will enter read-only mode.
+	StrictCompaction
+
+	// If present then a corrupted 'sorted table' will halts 'read operation'.
+	StrictReader
+
+	// If present then leveldb.Recover will drop corrupted 'sorted table'.
+	StrictRecovery
+
+	// This only applicable for ReadOptions, if present then this ReadOptions
+	// 'strict level' will override global ones.
+	StrictOverride
+
+	// StrictAll enables all strict flags.
+	StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
+
+	// DefaultStrict is the default strict flags. Specify any strict flags
+	// will override default strict flags as whole (i.e. not OR'ed).
+	DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
+
+	// NoStrict disables all strict flags. Override default strict flags.
+	NoStrict = ^StrictAll
+)
+
+// Options holds the optional parameters for the DB at large.
+type Options struct {
+	// AltFilters defines one or more 'alternative filters'.
+	// 'alternative filters' will be used during reads if a filter block
+	// does not match with the 'effective filter'.
+	//
+	// The default value is nil
+	AltFilters []filter.Filter
+
+	// BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
+	// Specify NoCacher to disable caching algorithm.
+	//
+	// The default value is LRUCacher.
+	BlockCacher Cacher
+
+	// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
+	// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+	//
+	// The default value is 8MiB.
+	BlockCacheCapacity int
+
+	// BlockRestartInterval is the number of keys between restart points for
+	// delta encoding of keys.
+	//
+	// The default value is 16.
+	BlockRestartInterval int
+
+	// BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
+	// block.
+	//
+	// The default value is 4KiB.
+	BlockSize int
+
+	// CompactionExpandLimitFactor limits compaction size after expanded.
+	// This will be multiplied by table size limit at compaction target level.
+	//
+	// The default value is 25.
+	CompactionExpandLimitFactor int
+
+	// CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
+	// single 'sorted table' generates.
+	// This will be multiplied by table size limit at grandparent level.
+	//
+	// The default value is 10.
+	CompactionGPOverlapsFactor int
+
+	// CompactionL0Trigger defines number of 'sorted table' at level-0 that will
+	// trigger compaction.
+	//
+	// The default value is 4.
+	CompactionL0Trigger int
+
+	// CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
+	// level-0.
+	// This will be multiplied by table size limit at compaction target level.
+	//
+	// The default value is 1.
+	CompactionSourceLimitFactor int
+
+	// CompactionTableSize limits size of 'sorted table' that compaction generates.
+	// The limits for each level will be calculated as:
+	//   CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
+	// The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
+	//
+	// The default value is 2MiB.
+	CompactionTableSize int
+
+	// CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
+	//
+	// The default value is 1.
+	CompactionTableSizeMultiplier float64
+
+	// CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
+	// CompactionTableSize.
+	// Use zero to skip a level.
+	//
+	// The default value is nil.
+	CompactionTableSizeMultiplierPerLevel []float64
+
+	// CompactionTotalSize limits total size of 'sorted table' for each level.
+	// The limits for each level will be calculated as:
+	//   CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
+	// The multiplier for each level can also fine-tuned using
+	// CompactionTotalSizeMultiplierPerLevel.
+	//
+	// The default value is 10MiB.
+	CompactionTotalSize int
+
+	// CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
+	//
+	// The default value is 10.
+	CompactionTotalSizeMultiplier float64
+
+	// CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
+	// CompactionTotalSize.
+	// Use zero to skip a level.
+	//
+	// The default value is nil.
+	CompactionTotalSizeMultiplierPerLevel []float64
+
+	// Comparer defines a total ordering over the space of []byte keys: a 'less
+	// than' relationship. The same comparison algorithm must be used for reads
+	// and writes over the lifetime of the DB.
+	//
+	// The default value uses the same ordering as bytes.Compare.
+	Comparer comparer.Comparer
+
+	// Compression defines the 'sorted table' block compression to use.
+	//
+	// The default value (DefaultCompression) uses snappy compression.
+	Compression Compression
+
+	// DisableBufferPool allows disable use of util.BufferPool functionality.
+	//
+	// The default value is false.
+	DisableBufferPool bool
+
+	// DisableBlockCache allows disable use of cache.Cache functionality on
+	// 'sorted table' block.
+	//
+	// The default value is false.
+	DisableBlockCache bool
+
+	// DisableCompactionBackoff allows disable compaction retry backoff.
+	//
+	// The default value is false.
+	DisableCompactionBackoff bool
+
+	// ErrorIfExist defines whether an error should returned if the DB already
+	// exist.
+	//
+	// The default value is false.
+	ErrorIfExist bool
+
+	// ErrorIfMissing defines whether an error should returned if the DB is
+	// missing. If false then the database will be created if missing, otherwise
+	// an error will be returned.
+	//
+	// The default value is false.
+	ErrorIfMissing bool
+
+	// Filter defines an 'effective filter' to use. An 'effective filter'
+	// if defined will be used to generate per-table filter block.
+	// The filter name will be stored on disk.
+	// During reads LevelDB will try to find matching filter from
+	// 'effective filter' and 'alternative filters'.
+	//
+	// Filter can be changed after a DB has been created. It is recommended
+	// to put old filter to the 'alternative filters' to mitigate lack of
+	// filter during transition period.
+	//
+	// A filter is used to reduce disk reads when looking for a specific key.
+	//
+	// The default value is nil.
+	Filter filter.Filter
+
+	// IteratorSamplingRate defines approximate gap (in bytes) between read
+	// sampling of an iterator. The samples will be used to determine when
+	// compaction should be triggered.
+	//
+	// The default is 1MiB.
+	IteratorSamplingRate int
+
+	// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
+	// will be pushed into if doesn't creates overlap. This should less than
+	// NumLevel. Use -1 for level-0.
+	//
+	// The default is 2.
+	MaxMemCompationLevel int
+
+	// NoSync allows completely disable fsync.
+	//
+	// The default is false.
+	NoSync bool
+
+	// NumLevel defines number of database level. The level shouldn't changed
+	// between opens, or the database will panic.
+	//
+	// The default is 7.
+	NumLevel int
+
+	// OpenFilesCacher provides cache algorithm for open files caching.
+	// Specify NoCacher to disable caching algorithm.
+	//
+	// The default value is LRUCacher.
+	OpenFilesCacher Cacher
+
+	// OpenFilesCacheCapacity defines the capacity of the open files caching.
+	// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+	//
+	// The default value is 500.
+	OpenFilesCacheCapacity int
+
+	// If true then opens DB in read-only mode.
+	//
+	// The default value is false.
+	ReadOnly bool
+
+	// Strict defines the DB strict level.
+	Strict Strict
+
+	// WriteBuffer defines maximum size of a 'memdb' before flushed to
+	// 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
+	// unsorted journal.
+	//
+	// LevelDB may held up to two 'memdb' at the same time.
+	//
+	// The default value is 4MiB.
+	WriteBuffer int
+
+	// WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
+	// pause write.
+	//
+	// The default value is 12.
+	WriteL0PauseTrigger int
+
+	// WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
+	// will trigger write slowdown.
+	//
+	// The default value is 8.
+	WriteL0SlowdownTrigger int
+}
+
+func (o *Options) GetAltFilters() []filter.Filter {
+	if o == nil {
+		return nil
+	}
+	return o.AltFilters
+}
+
+func (o *Options) GetBlockCacher() Cacher {
+	if o == nil || o.BlockCacher == nil {
+		return DefaultBlockCacher
+	} else if o.BlockCacher == NoCacher {
+		return nil
+	}
+	return o.BlockCacher
+}
+
+func (o *Options) GetBlockCacheCapacity() int {
+	if o == nil || o.BlockCacheCapacity == 0 {
+		return DefaultBlockCacheCapacity
+	} else if o.BlockCacheCapacity < 0 {
+		return 0
+	}
+	return o.BlockCacheCapacity
+}
+
+func (o *Options) GetBlockRestartInterval() int {
+	if o == nil || o.BlockRestartInterval <= 0 {
+		return DefaultBlockRestartInterval
+	}
+	return o.BlockRestartInterval
+}
+
+func (o *Options) GetBlockSize() int {
+	if o == nil || o.BlockSize <= 0 {
+		return DefaultBlockSize
+	}
+	return o.BlockSize
+}
+
+func (o *Options) GetCompactionExpandLimit(level int) int {
+	factor := DefaultCompactionExpandLimitFactor
+	if o != nil && o.CompactionExpandLimitFactor > 0 {
+		factor = o.CompactionExpandLimitFactor
+	}
+	return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionGPOverlaps(level int) int {
+	factor := DefaultCompactionGPOverlapsFactor
+	if o != nil && o.CompactionGPOverlapsFactor > 0 {
+		factor = o.CompactionGPOverlapsFactor
+	}
+	return o.GetCompactionTableSize(level+2) * factor
+}
+
+func (o *Options) GetCompactionL0Trigger() int {
+	if o == nil || o.CompactionL0Trigger == 0 {
+		return DefaultCompactionL0Trigger
+	}
+	return o.CompactionL0Trigger
+}
+
+func (o *Options) GetCompactionSourceLimit(level int) int {
+	factor := DefaultCompactionSourceLimitFactor
+	if o != nil && o.CompactionSourceLimitFactor > 0 {
+		factor = o.CompactionSourceLimitFactor
+	}
+	return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionTableSize(level int) int {
+	var (
+		base = DefaultCompactionTableSize
+		mult float64
+	)
+	if o != nil {
+		if o.CompactionTableSize > 0 {
+			base = o.CompactionTableSize
+		}
+		if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
+			mult = o.CompactionTableSizeMultiplierPerLevel[level]
+		} else if o.CompactionTableSizeMultiplier > 0 {
+			mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
+		}
+	}
+	if mult == 0 {
+		mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
+	}
+	return int(float64(base) * mult)
+}
+
+func (o *Options) GetCompactionTotalSize(level int) int64 {
+	var (
+		base = DefaultCompactionTotalSize
+		mult float64
+	)
+	if o != nil {
+		if o.CompactionTotalSize > 0 {
+			base = o.CompactionTotalSize
+		}
+		if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
+			mult = o.CompactionTotalSizeMultiplierPerLevel[level]
+		} else if o.CompactionTotalSizeMultiplier > 0 {
+			mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
+		}
+	}
+	if mult == 0 {
+		mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
+	}
+	return int64(float64(base) * mult)
+}
+
+func (o *Options) GetComparer() comparer.Comparer {
+	if o == nil || o.Comparer == nil {
+		return comparer.DefaultComparer
+	}
+	return o.Comparer
+}
+
+func (o *Options) GetCompression() Compression {
+	if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
+		return DefaultCompressionType
+	}
+	return o.Compression
+}
+
+func (o *Options) GetDisableBufferPool() bool {
+	if o == nil {
+		return false
+	}
+	return o.DisableBufferPool
+}
+
+func (o *Options) GetDisableBlockCache() bool {
+	if o == nil {
+		return false
+	}
+	return o.DisableBlockCache
+}
+
+func (o *Options) GetDisableCompactionBackoff() bool {
+	if o == nil {
+		return false
+	}
+	return o.DisableCompactionBackoff
+}
+
+func (o *Options) GetErrorIfExist() bool {
+	if o == nil {
+		return false
+	}
+	return o.ErrorIfExist
+}
+
+func (o *Options) GetErrorIfMissing() bool {
+	if o == nil {
+		return false
+	}
+	return o.ErrorIfMissing
+}
+
+func (o *Options) GetFilter() filter.Filter {
+	if o == nil {
+		return nil
+	}
+	return o.Filter
+}
+
+func (o *Options) GetIteratorSamplingRate() int {
+	if o == nil || o.IteratorSamplingRate <= 0 {
+		return DefaultIteratorSamplingRate
+	}
+	return o.IteratorSamplingRate
+}
+
+func (o *Options) GetMaxMemCompationLevel() int {
+	level := DefaultMaxMemCompationLevel
+	if o != nil {
+		if o.MaxMemCompationLevel > 0 {
+			level = o.MaxMemCompationLevel
+		} else if o.MaxMemCompationLevel < 0 {
+			level = 0
+		}
+	}
+	if level >= o.GetNumLevel() {
+		return o.GetNumLevel() - 1
+	}
+	return level
+}
+
+func (o *Options) GetNoSync() bool {
+	if o == nil {
+		return false
+	}
+	return o.NoSync
+}
+
+func (o *Options) GetNumLevel() int {
+	if o == nil || o.NumLevel <= 0 {
+		return DefaultNumLevel
+	}
+	return o.NumLevel
+}
+
+func (o *Options) GetOpenFilesCacher() Cacher {
+	if o == nil || o.OpenFilesCacher == nil {
+		return DefaultOpenFilesCacher
+	}
+	if o.OpenFilesCacher == NoCacher {
+		return nil
+	}
+	return o.OpenFilesCacher
+}
+
+func (o *Options) GetOpenFilesCacheCapacity() int {
+	if o == nil || o.OpenFilesCacheCapacity == 0 {
+		return DefaultOpenFilesCacheCapacity
+	} else if o.OpenFilesCacheCapacity < 0 {
+		return 0
+	}
+	return o.OpenFilesCacheCapacity
+}
+
+func (o *Options) GetReadOnly() bool {
+	if o == nil {
+		return false
+	}
+	return o.ReadOnly
+}
+
+func (o *Options) GetStrict(strict Strict) bool {
+	if o == nil || o.Strict == 0 {
+		return DefaultStrict&strict != 0
+	}
+	return o.Strict&strict != 0
+}
+
+func (o *Options) GetWriteBuffer() int {
+	if o == nil || o.WriteBuffer <= 0 {
+		return DefaultWriteBuffer
+	}
+	return o.WriteBuffer
+}
+
+func (o *Options) GetWriteL0PauseTrigger() int {
+	if o == nil || o.WriteL0PauseTrigger == 0 {
+		return DefaultWriteL0PauseTrigger
+	}
+	return o.WriteL0PauseTrigger
+}
+
+func (o *Options) GetWriteL0SlowdownTrigger() int {
+	if o == nil || o.WriteL0SlowdownTrigger == 0 {
+		return DefaultWriteL0SlowdownTrigger
+	}
+	return o.WriteL0SlowdownTrigger
+}
+
+// ReadOptions holds the optional parameters for 'read operation'. The
+// 'read operation' includes Get, Find and NewIterator.
+type ReadOptions struct {
+	// DontFillCache defines whether block reads for this 'read operation'
+	// should be cached. If false then the block will be cached. This does
+	// not affects already cached block.
+	//
+	// The default value is false.
+	DontFillCache bool
+
+	// Strict will be OR'ed with global DB 'strict level' unless StrictOverride
+	// is present. Currently only StrictReader that has effect here.
+	Strict Strict
+}
+
+func (ro *ReadOptions) GetDontFillCache() bool {
+	if ro == nil {
+		return false
+	}
+	return ro.DontFillCache
+}
+
+func (ro *ReadOptions) GetStrict(strict Strict) bool {
+	if ro == nil {
+		return false
+	}
+	return ro.Strict&strict != 0
+}
+
+// WriteOptions holds the optional parameters for 'write operation'. The
+// 'write operation' includes Write, Put and Delete.
+type WriteOptions struct {
+	// Sync is whether to sync underlying writes from the OS buffer cache
+	// through to actual disk, if applicable. Setting Sync can result in
+	// slower writes.
+	//
+	// If false, and the machine crashes, then some recent writes may be lost.
+	// Note that if it is just the process that crashes (and the machine does
+	// not) then no writes will be lost.
+	//
+	// In other words, Sync being false has the same semantics as a write
+	// system call. Sync being true means write followed by fsync.
+	//
+	// The default value is false.
+	Sync bool
+}
+
+func (wo *WriteOptions) GetSync() bool {
+	if wo == nil {
+		return false
+	}
+	return wo.Sync
+}
+
+func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
+	if ro.GetStrict(StrictOverride) {
+		return ro.GetStrict(strict)
+	} else {
+		return o.GetStrict(strict) || ro.GetStrict(strict)
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
new file mode 100644
index 0000000..9734e39
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
@@ -0,0 +1,92 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func dupOptions(o *opt.Options) *opt.Options {
+	newo := &opt.Options{}
+	if o != nil {
+		*newo = *o
+	}
+	if newo.Strict == 0 {
+		newo.Strict = opt.DefaultStrict
+	}
+	return newo
+}
+
+func (s *session) setOptions(o *opt.Options) {
+	no := dupOptions(o)
+	// Alternative filters.
+	if filters := o.GetAltFilters(); len(filters) > 0 {
+		no.AltFilters = make([]filter.Filter, len(filters))
+		for i, filter := range filters {
+			no.AltFilters[i] = &iFilter{filter}
+		}
+	}
+	// Comparer.
+	s.icmp = &iComparer{o.GetComparer()}
+	no.Comparer = s.icmp
+	// Filter.
+	if filter := o.GetFilter(); filter != nil {
+		no.Filter = &iFilter{filter}
+	}
+
+	s.o = &cachedOptions{Options: no}
+	s.o.cache()
+}
+
+type cachedOptions struct {
+	*opt.Options
+
+	compactionExpandLimit []int
+	compactionGPOverlaps  []int
+	compactionSourceLimit []int
+	compactionTableSize   []int
+	compactionTotalSize   []int64
+}
+
+func (co *cachedOptions) cache() {
+	numLevel := co.Options.GetNumLevel()
+
+	co.compactionExpandLimit = make([]int, numLevel)
+	co.compactionGPOverlaps = make([]int, numLevel)
+	co.compactionSourceLimit = make([]int, numLevel)
+	co.compactionTableSize = make([]int, numLevel)
+	co.compactionTotalSize = make([]int64, numLevel)
+
+	for level := 0; level < numLevel; level++ {
+		co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
+		co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
+		co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
+		co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
+		co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
+	}
+}
+
+func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
+	return co.compactionExpandLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
+	return co.compactionGPOverlaps[level]
+}
+
+func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
+	return co.compactionSourceLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionTableSize(level int) int {
+	return co.compactionTableSize[level]
+}
+
+func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
+	return co.compactionTotalSize[level]
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
new file mode 100644
index 0000000..153bf08
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
@@ -0,0 +1,211 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"sync"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type ErrManifestCorrupted struct {
+	Field  string
+	Reason string
+}
+
+func (e *ErrManifestCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
+}
+
+func newErrManifestCorrupted(f storage.File, field, reason string) error {
+	return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
+}
+
+// session represent a persistent database session.
+type session struct {
+	// Need 64-bit alignment.
+	stNextFileNum    uint64 // current unused file number
+	stJournalNum     uint64 // current journal file number; need external synchronization
+	stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
+	stSeqNum         uint64 // last mem compacted seq; need external synchronization
+	stTempFileNum    uint64
+
+	stor     storage.Storage
+	storLock util.Releaser
+	o        *cachedOptions
+	icmp     *iComparer
+	tops     *tOps
+
+	manifest       *journal.Writer
+	manifestWriter storage.Writer
+	manifestFile   storage.File
+
+	stCompPtrs []iKey   // compaction pointers; need external synchronization
+	stVersion  *version // current version
+	vmu        sync.Mutex
+}
+
+// Creates new initialized session instance.
+func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
+	if stor == nil {
+		return nil, os.ErrInvalid
+	}
+	storLock, err := stor.Lock()
+	if err != nil {
+		return
+	}
+	s = &session{
+		stor:       stor,
+		storLock:   storLock,
+		stCompPtrs: make([]iKey, o.GetNumLevel()),
+	}
+	s.setOptions(o)
+	s.tops = newTableOps(s)
+	s.setVersion(newVersion(s))
+	s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
+	return
+}
+
+// Close session.
+func (s *session) close() {
+	s.tops.close()
+	if s.manifest != nil {
+		s.manifest.Close()
+	}
+	if s.manifestWriter != nil {
+		s.manifestWriter.Close()
+	}
+	s.manifest = nil
+	s.manifestWriter = nil
+	s.manifestFile = nil
+	s.stVersion = nil
+}
+
+// Release session lock.
+func (s *session) release() {
+	s.storLock.Release()
+}
+
+// Create a new database session; need external synchronization.
+func (s *session) create() error {
+	// create manifest
+	return s.newManifest(nil, nil)
+}
+
+// Recover a database session; need external synchronization.
+func (s *session) recover() (err error) {
+	defer func() {
+		if os.IsNotExist(err) {
+			// Don't return os.ErrNotExist if the underlying storage contains
+			// other files that belong to LevelDB. So the DB won't get trashed.
+			if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
+				err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
+			}
+		}
+	}()
+
+	m, err := s.stor.GetManifest()
+	if err != nil {
+		return
+	}
+
+	reader, err := m.Open()
+	if err != nil {
+		return
+	}
+	defer reader.Close()
+
+	var (
+		// Options.
+		numLevel = s.o.GetNumLevel()
+		strict   = s.o.GetStrict(opt.StrictManifest)
+
+		jr      = journal.NewReader(reader, dropper{s, m}, strict, true)
+		rec     = &sessionRecord{}
+		staging = s.stVersion.newStaging()
+	)
+	for {
+		var r io.Reader
+		r, err = jr.Next()
+		if err != nil {
+			if err == io.EOF {
+				err = nil
+				break
+			}
+			return errors.SetFile(err, m)
+		}
+
+		err = rec.decode(r, numLevel)
+		if err == nil {
+			// save compact pointers
+			for _, r := range rec.compPtrs {
+				s.stCompPtrs[r.level] = iKey(r.ikey)
+			}
+			// commit record to version staging
+			staging.commit(rec)
+		} else {
+			err = errors.SetFile(err, m)
+			if strict || !errors.IsCorrupted(err) {
+				return
+			} else {
+				s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
+			}
+		}
+		rec.resetCompPtrs()
+		rec.resetAddedTables()
+		rec.resetDeletedTables()
+	}
+
+	switch {
+	case !rec.has(recComparer):
+		return newErrManifestCorrupted(m, "comparer", "missing")
+	case rec.comparer != s.icmp.uName():
+		return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
+	case !rec.has(recNextFileNum):
+		return newErrManifestCorrupted(m, "next-file-num", "missing")
+	case !rec.has(recJournalNum):
+		return newErrManifestCorrupted(m, "journal-file-num", "missing")
+	case !rec.has(recSeqNum):
+		return newErrManifestCorrupted(m, "seq-num", "missing")
+	}
+
+	s.manifestFile = m
+	s.setVersion(staging.finish())
+	s.setNextFileNum(rec.nextFileNum)
+	s.recordCommited(rec)
+	return nil
+}
+
+// Commit session; need external synchronization.
+func (s *session) commit(r *sessionRecord) (err error) {
+	v := s.version()
+	defer v.release()
+
+	// spawn new version based on current version
+	nv := v.spawn(r)
+
+	if s.manifest == nil {
+		// manifest journal writer not yet created, create one
+		err = s.newManifest(r, nv)
+	} else {
+		err = s.flushManifest(r)
+	}
+
+	// finally, apply new version if no error rise
+	if err == nil {
+		s.setVersion(nv)
+	}
+
+	return
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
new file mode 100644
index 0000000..62c3cbc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
@@ -0,0 +1,287 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"sync/atomic"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func (s *session) pickMemdbLevel(umin, umax []byte) int {
+	v := s.version()
+	defer v.release()
+	return v.pickMemdbLevel(umin, umax)
+}
+
+func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
+	// Create sorted table.
+	iter := mdb.NewIterator(nil)
+	defer iter.Release()
+	t, n, err := s.tops.createFrom(iter)
+	if err != nil {
+		return level, err
+	}
+
+	// Pick level and add to record.
+	if level < 0 {
+		level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
+	}
+	rec.addTableFile(level, t)
+
+	s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
+	return level, nil
+}
+
+// Pick a compaction based on current state; need external synchronization.
+func (s *session) pickCompaction() *compaction {
+	v := s.version()
+
+	var level int
+	var t0 tFiles
+	if v.cScore >= 1 {
+		level = v.cLevel
+		cptr := s.stCompPtrs[level]
+		tables := v.tables[level]
+		for _, t := range tables {
+			if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
+				t0 = append(t0, t)
+				break
+			}
+		}
+		if len(t0) == 0 {
+			t0 = append(t0, tables[0])
+		}
+	} else {
+		if p := atomic.LoadPointer(&v.cSeek); p != nil {
+			ts := (*tSet)(p)
+			level = ts.level
+			t0 = append(t0, ts.table)
+		} else {
+			v.release()
+			return nil
+		}
+	}
+
+	return newCompaction(s, v, level, t0)
+}
+
+// Create compaction from given level and range; need external synchronization.
+func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
+	v := s.version()
+
+	t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
+	if len(t0) == 0 {
+		v.release()
+		return nil
+	}
+
+	// Avoid compacting too much in one shot in case the range is large.
+	// But we cannot do this for level-0 since level-0 files can overlap
+	// and we must not pick one file and drop another older file if the
+	// two files overlap.
+	if level > 0 {
+		limit := uint64(v.s.o.GetCompactionSourceLimit(level))
+		total := uint64(0)
+		for i, t := range t0 {
+			total += t.size
+			if total >= limit {
+				s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
+				t0 = t0[:i+1]
+				break
+			}
+		}
+	}
+
+	return newCompaction(s, v, level, t0)
+}
+
+func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
+	c := &compaction{
+		s:             s,
+		v:             v,
+		level:         level,
+		tables:        [2]tFiles{t0, nil},
+		maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
+		tPtrs:         make([]int, s.o.GetNumLevel()),
+	}
+	c.expand()
+	c.save()
+	return c
+}
+
+// compaction represent a compaction state.
+type compaction struct {
+	s *session
+	v *version
+
+	level         int
+	tables        [2]tFiles
+	maxGPOverlaps uint64
+
+	gp                tFiles
+	gpi               int
+	seenKey           bool
+	gpOverlappedBytes uint64
+	imin, imax        iKey
+	tPtrs             []int
+	released          bool
+
+	snapGPI               int
+	snapSeenKey           bool
+	snapGPOverlappedBytes uint64
+	snapTPtrs             []int
+}
+
+func (c *compaction) save() {
+	c.snapGPI = c.gpi
+	c.snapSeenKey = c.seenKey
+	c.snapGPOverlappedBytes = c.gpOverlappedBytes
+	c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
+
+func (c *compaction) restore() {
+	c.gpi = c.snapGPI
+	c.seenKey = c.snapSeenKey
+	c.gpOverlappedBytes = c.snapGPOverlappedBytes
+	c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
+
+func (c *compaction) release() {
+	if !c.released {
+		c.released = true
+		c.v.release()
+	}
+}
+
+// Expand compacted tables; need external synchronization.
+func (c *compaction) expand() {
+	limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
+	vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
+
+	t0, t1 := c.tables[0], c.tables[1]
+	imin, imax := t0.getRange(c.s.icmp)
+	// We expand t0 here just incase ukey hop across tables.
+	t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
+	if len(t0) != len(c.tables[0]) {
+		imin, imax = t0.getRange(c.s.icmp)
+	}
+	t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+	// Get entire range covered by compaction.
+	amin, amax := append(t0, t1...).getRange(c.s.icmp)
+
+	// See if we can grow the number of inputs in "level" without
+	// changing the number of "level+1" files we pick up.
+	if len(t1) > 0 {
+		exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
+		if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+			xmin, xmax := exp0.getRange(c.s.icmp)
+			exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
+			if len(exp1) == len(t1) {
+				c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+					c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+					len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
+				imin, imax = xmin, xmax
+				t0, t1 = exp0, exp1
+				amin, amax = append(t0, t1...).getRange(c.s.icmp)
+			}
+		}
+	}
+
+	// Compute the set of grandparent files that overlap this compaction
+	// (parent == level+1; grandparent == level+2)
+	if c.level+2 < c.s.o.GetNumLevel() {
+		c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
+	}
+
+	c.tables[0], c.tables[1] = t0, t1
+	c.imin, c.imax = imin, imax
+}
+
+// Check whether compaction is trivial.
+func (c *compaction) trivial() bool {
+	return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
+}
+
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+	for level, tables := range c.v.tables[c.level+2:] {
+		for c.tPtrs[level] < len(tables) {
+			t := tables[c.tPtrs[level]]
+			if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+				// We've advanced far enough.
+				if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+					// Key falls in this file's range, so definitely not base level.
+					return false
+				}
+				break
+			}
+			c.tPtrs[level]++
+		}
+	}
+	return true
+}
+
+func (c *compaction) shouldStopBefore(ikey iKey) bool {
+	for ; c.gpi < len(c.gp); c.gpi++ {
+		gp := c.gp[c.gpi]
+		if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
+			break
+		}
+		if c.seenKey {
+			c.gpOverlappedBytes += gp.size
+		}
+	}
+	c.seenKey = true
+
+	if c.gpOverlappedBytes > c.maxGPOverlaps {
+		// Too much overlap for current output; start new output.
+		c.gpOverlappedBytes = 0
+		return true
+	}
+	return false
+}
+
+// Creates an iterator.
+func (c *compaction) newIterator() iterator.Iterator {
+	// Creates iterator slice.
+	icap := len(c.tables)
+	if c.level == 0 {
+		// Special case for level-0.
+		icap = len(c.tables[0]) + 1
+	}
+	its := make([]iterator.Iterator, 0, icap)
+
+	// Options.
+	ro := &opt.ReadOptions{
+		DontFillCache: true,
+		Strict:        opt.StrictOverride,
+	}
+	strict := c.s.o.GetStrict(opt.StrictCompaction)
+	if strict {
+		ro.Strict |= opt.StrictReader
+	}
+
+	for i, tables := range c.tables {
+		if len(tables) == 0 {
+			continue
+		}
+
+		// Level-0 is not sorted and may overlaps each other.
+		if c.level+i == 0 {
+			for _, t := range tables {
+				its = append(its, c.s.tops.newIterator(t, nil, ro))
+			}
+		} else {
+			it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
+			its = append(its, it)
+		}
+	}
+
+	return iterator.NewMergedIterator(its, c.s.icmp, strict)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
new file mode 100644
index 0000000..3066c0c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -0,0 +1,311 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"bufio"
+	"encoding/binary"
+	"io"
+	"strings"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+type byteReader interface {
+	io.Reader
+	io.ByteReader
+}
+
+// These numbers are written to disk and should not be changed.
+const (
+	recComparer    = 1
+	recJournalNum  = 2
+	recNextFileNum = 3
+	recSeqNum      = 4
+	recCompPtr     = 5
+	recDelTable    = 6
+	recAddTable    = 7
+	// 8 was used for large value refs
+	recPrevJournalNum = 9
+)
+
+type cpRecord struct {
+	level int
+	ikey  iKey
+}
+
+type atRecord struct {
+	level int
+	num   uint64
+	size  uint64
+	imin  iKey
+	imax  iKey
+}
+
+type dtRecord struct {
+	level int
+	num   uint64
+}
+
+type sessionRecord struct {
+	hasRec         int
+	comparer       string
+	journalNum     uint64
+	prevJournalNum uint64
+	nextFileNum    uint64
+	seqNum         uint64
+	compPtrs       []cpRecord
+	addedTables    []atRecord
+	deletedTables  []dtRecord
+
+	scratch [binary.MaxVarintLen64]byte
+	err     error
+}
+
+func (p *sessionRecord) has(rec int) bool {
+	return p.hasRec&(1<<uint(rec)) != 0
+}
+
+func (p *sessionRecord) setComparer(name string) {
+	p.hasRec |= 1 << recComparer
+	p.comparer = name
+}
+
+func (p *sessionRecord) setJournalNum(num uint64) {
+	p.hasRec |= 1 << recJournalNum
+	p.journalNum = num
+}
+
+func (p *sessionRecord) setPrevJournalNum(num uint64) {
+	p.hasRec |= 1 << recPrevJournalNum
+	p.prevJournalNum = num
+}
+
+func (p *sessionRecord) setNextFileNum(num uint64) {
+	p.hasRec |= 1 << recNextFileNum
+	p.nextFileNum = num
+}
+
+func (p *sessionRecord) setSeqNum(num uint64) {
+	p.hasRec |= 1 << recSeqNum
+	p.seqNum = num
+}
+
+func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
+	p.hasRec |= 1 << recCompPtr
+	p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
+}
+
+func (p *sessionRecord) resetCompPtrs() {
+	p.hasRec &= ^(1 << recCompPtr)
+	p.compPtrs = p.compPtrs[:0]
+}
+
+func (p *sessionRecord) addTable(level int, num, size uint64, imin, imax iKey) {
+	p.hasRec |= 1 << recAddTable
+	p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
+}
+
+func (p *sessionRecord) addTableFile(level int, t *tFile) {
+	p.addTable(level, t.file.Num(), t.size, t.imin, t.imax)
+}
+
+func (p *sessionRecord) resetAddedTables() {
+	p.hasRec &= ^(1 << recAddTable)
+	p.addedTables = p.addedTables[:0]
+}
+
+func (p *sessionRecord) delTable(level int, num uint64) {
+	p.hasRec |= 1 << recDelTable
+	p.deletedTables = append(p.deletedTables, dtRecord{level, num})
+}
+
+func (p *sessionRecord) resetDeletedTables() {
+	p.hasRec &= ^(1 << recDelTable)
+	p.deletedTables = p.deletedTables[:0]
+}
+
+func (p *sessionRecord) putUvarint(w io.Writer, x uint64) {
+	if p.err != nil {
+		return
+	}
+	n := binary.PutUvarint(p.scratch[:], x)
+	_, p.err = w.Write(p.scratch[:n])
+}
+
+func (p *sessionRecord) putBytes(w io.Writer, x []byte) {
+	if p.err != nil {
+		return
+	}
+	p.putUvarint(w, uint64(len(x)))
+	if p.err != nil {
+		return
+	}
+	_, p.err = w.Write(x)
+}
+
+func (p *sessionRecord) encode(w io.Writer) error {
+	p.err = nil
+	if p.has(recComparer) {
+		p.putUvarint(w, recComparer)
+		p.putBytes(w, []byte(p.comparer))
+	}
+	if p.has(recJournalNum) {
+		p.putUvarint(w, recJournalNum)
+		p.putUvarint(w, p.journalNum)
+	}
+	if p.has(recNextFileNum) {
+		p.putUvarint(w, recNextFileNum)
+		p.putUvarint(w, p.nextFileNum)
+	}
+	if p.has(recSeqNum) {
+		p.putUvarint(w, recSeqNum)
+		p.putUvarint(w, p.seqNum)
+	}
+	for _, r := range p.compPtrs {
+		p.putUvarint(w, recCompPtr)
+		p.putUvarint(w, uint64(r.level))
+		p.putBytes(w, r.ikey)
+	}
+	for _, r := range p.deletedTables {
+		p.putUvarint(w, recDelTable)
+		p.putUvarint(w, uint64(r.level))
+		p.putUvarint(w, r.num)
+	}
+	for _, r := range p.addedTables {
+		p.putUvarint(w, recAddTable)
+		p.putUvarint(w, uint64(r.level))
+		p.putUvarint(w, r.num)
+		p.putUvarint(w, r.size)
+		p.putBytes(w, r.imin)
+		p.putBytes(w, r.imax)
+	}
+	return p.err
+}
+
+func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF bool) uint64 {
+	if p.err != nil {
+		return 0
+	}
+	x, err := binary.ReadUvarint(r)
+	if err != nil {
+		if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) {
+			p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
+		} else if strings.HasPrefix(err.Error(), "binary:") {
+			p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, err.Error()})
+		} else {
+			p.err = err
+		}
+		return 0
+	}
+	return x
+}
+
+func (p *sessionRecord) readUvarint(field string, r io.ByteReader) uint64 {
+	return p.readUvarintMayEOF(field, r, false)
+}
+
+func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
+	if p.err != nil {
+		return nil
+	}
+	n := p.readUvarint(field, r)
+	if p.err != nil {
+		return nil
+	}
+	x := make([]byte, n)
+	_, p.err = io.ReadFull(r, x)
+	if p.err != nil {
+		if p.err == io.ErrUnexpectedEOF {
+			p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
+		}
+		return nil
+	}
+	return x
+}
+
+func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int {
+	if p.err != nil {
+		return 0
+	}
+	x := p.readUvarint(field, r)
+	if p.err != nil {
+		return 0
+	}
+	if x >= uint64(numLevel) {
+		p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
+		return 0
+	}
+	return int(x)
+}
+
+func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
+	br, ok := r.(byteReader)
+	if !ok {
+		br = bufio.NewReader(r)
+	}
+	p.err = nil
+	for p.err == nil {
+		rec := p.readUvarintMayEOF("field-header", br, true)
+		if p.err != nil {
+			if p.err == io.EOF {
+				return nil
+			}
+			return p.err
+		}
+		switch rec {
+		case recComparer:
+			x := p.readBytes("comparer", br)
+			if p.err == nil {
+				p.setComparer(string(x))
+			}
+		case recJournalNum:
+			x := p.readUvarint("journal-num", br)
+			if p.err == nil {
+				p.setJournalNum(x)
+			}
+		case recPrevJournalNum:
+			x := p.readUvarint("prev-journal-num", br)
+			if p.err == nil {
+				p.setPrevJournalNum(x)
+			}
+		case recNextFileNum:
+			x := p.readUvarint("next-file-num", br)
+			if p.err == nil {
+				p.setNextFileNum(x)
+			}
+		case recSeqNum:
+			x := p.readUvarint("seq-num", br)
+			if p.err == nil {
+				p.setSeqNum(x)
+			}
+		case recCompPtr:
+			level := p.readLevel("comp-ptr.level", br, numLevel)
+			ikey := p.readBytes("comp-ptr.ikey", br)
+			if p.err == nil {
+				p.addCompPtr(level, iKey(ikey))
+			}
+		case recAddTable:
+			level := p.readLevel("add-table.level", br, numLevel)
+			num := p.readUvarint("add-table.num", br)
+			size := p.readUvarint("add-table.size", br)
+			imin := p.readBytes("add-table.imin", br)
+			imax := p.readBytes("add-table.imax", br)
+			if p.err == nil {
+				p.addTable(level, num, size, imin, imax)
+			}
+		case recDelTable:
+			level := p.readLevel("del-table.level", br, numLevel)
+			num := p.readUvarint("del-table.num", br)
+			if p.err == nil {
+				p.delTable(level, num)
+			}
+		}
+	}
+
+	return p.err
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
new file mode 100644
index 0000000..a2d4b3c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"bytes"
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func decodeEncode(v *sessionRecord) (res bool, err error) {
+	b := new(bytes.Buffer)
+	err = v.encode(b)
+	if err != nil {
+		return
+	}
+	v2 := &sessionRecord{}
+	err = v.decode(b, opt.DefaultNumLevel)
+	if err != nil {
+		return
+	}
+	b2 := new(bytes.Buffer)
+	err = v2.encode(b2)
+	if err != nil {
+		return
+	}
+	return bytes.Equal(b.Bytes(), b2.Bytes()), nil
+}
+
+func TestSessionRecord_EncodeDecode(t *testing.T) {
+	big := uint64(1) << 50
+	v := &sessionRecord{}
+	i := uint64(0)
+	test := func() {
+		res, err := decodeEncode(v)
+		if err != nil {
+			t.Fatalf("error when testing encode/decode sessionRecord: %v", err)
+		}
+		if !res {
+			t.Error("encode/decode test failed at iteration:", i)
+		}
+	}
+
+	for ; i < 4; i++ {
+		test()
+		v.addTable(3, big+300+i, big+400+i,
+			newIkey([]byte("foo"), big+500+1, ktVal),
+			newIkey([]byte("zoo"), big+600+1, ktDel))
+		v.delTable(4, big+700+i)
+		v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal))
+	}
+
+	v.setComparer("foo")
+	v.setJournalNum(big + 100)
+	v.setPrevJournalNum(big + 99)
+	v.setNextFileNum(big + 200)
+	v.setSeqNum(big + 1000)
+	test()
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
new file mode 100644
index 0000000..0502582
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -0,0 +1,249 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"fmt"
+	"sync/atomic"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+type dropper struct {
+	s    *session // Logging.
+	file storage.File
+}
+
+func (d dropper) Drop(err error) {
+	if e, ok := err.(*journal.ErrCorrupted); ok {
+		d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
+	} else {
+		d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)
+	}
+}
+
+func (s *session) log(v ...interface{})                 { s.stor.Log(fmt.Sprint(v...)) }
+func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
+
+// File utils.
+
+func (s *session) getJournalFile(num uint64) storage.File {
+	return s.stor.GetFile(num, storage.TypeJournal)
+}
+
+func (s *session) getTableFile(num uint64) storage.File {
+	return s.stor.GetFile(num, storage.TypeTable)
+}
+
+func (s *session) getFiles(t storage.FileType) ([]storage.File, error) {
+	return s.stor.GetFiles(t)
+}
+
+func (s *session) newTemp() storage.File {
+	num := atomic.AddUint64(&s.stTempFileNum, 1) - 1
+	return s.stor.GetFile(num, storage.TypeTemp)
+}
+
+func (s *session) tableFileFromRecord(r atRecord) *tFile {
+	return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax)
+}
+
+// Session state.
+
+// Get current version. This will incr version ref, must call
+// version.release (exactly once) after use.
+func (s *session) version() *version {
+	s.vmu.Lock()
+	defer s.vmu.Unlock()
+	s.stVersion.ref++
+	return s.stVersion
+}
+
+// Set current version to v.
+func (s *session) setVersion(v *version) {
+	s.vmu.Lock()
+	v.ref = 1 // Holds by session.
+	if old := s.stVersion; old != nil {
+		v.ref++ // Holds by old version.
+		old.next = v
+		old.releaseNB()
+	}
+	s.stVersion = v
+	s.vmu.Unlock()
+}
+
+// Get current unused file number.
+func (s *session) nextFileNum() uint64 {
+	return atomic.LoadUint64(&s.stNextFileNum)
+}
+
+// Set current unused file number to num.
+func (s *session) setNextFileNum(num uint64) {
+	atomic.StoreUint64(&s.stNextFileNum, num)
+}
+
+// Mark file number as used.
+func (s *session) markFileNum(num uint64) {
+	nextFileNum := num + 1
+	for {
+		old, x := s.stNextFileNum, nextFileNum
+		if old > x {
+			x = old
+		}
+		if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
+			break
+		}
+	}
+}
+
+// Allocate a file number.
+func (s *session) allocFileNum() uint64 {
+	return atomic.AddUint64(&s.stNextFileNum, 1) - 1
+}
+
+// Reuse given file number.
+func (s *session) reuseFileNum(num uint64) {
+	for {
+		old, x := s.stNextFileNum, num
+		if old != x+1 {
+			x = old
+		}
+		if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
+			break
+		}
+	}
+}
+
+// Manifest related utils.
+
+// Fill given session record obj with current states; need external
+// synchronization.
+func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
+	r.setNextFileNum(s.nextFileNum())
+
+	if snapshot {
+		if !r.has(recJournalNum) {
+			r.setJournalNum(s.stJournalNum)
+		}
+
+		if !r.has(recSeqNum) {
+			r.setSeqNum(s.stSeqNum)
+		}
+
+		for level, ik := range s.stCompPtrs {
+			if ik != nil {
+				r.addCompPtr(level, ik)
+			}
+		}
+
+		r.setComparer(s.icmp.uName())
+	}
+}
+
+// Mark if record has been committed, this will update session state;
+// need external synchronization.
+func (s *session) recordCommited(r *sessionRecord) {
+	if r.has(recJournalNum) {
+		s.stJournalNum = r.journalNum
+	}
+
+	if r.has(recPrevJournalNum) {
+		s.stPrevJournalNum = r.prevJournalNum
+	}
+
+	if r.has(recSeqNum) {
+		s.stSeqNum = r.seqNum
+	}
+
+	for _, p := range r.compPtrs {
+		s.stCompPtrs[p.level] = iKey(p.ikey)
+	}
+}
+
+// Create a new manifest file; need external synchronization.
+func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
+	num := s.allocFileNum()
+	file := s.stor.GetFile(num, storage.TypeManifest)
+	writer, err := file.Create()
+	if err != nil {
+		return
+	}
+	jw := journal.NewWriter(writer)
+
+	if v == nil {
+		v = s.version()
+		defer v.release()
+	}
+	if rec == nil {
+		rec = &sessionRecord{}
+	}
+	s.fillRecord(rec, true)
+	v.fillRecord(rec)
+
+	defer func() {
+		if err == nil {
+			s.recordCommited(rec)
+			if s.manifest != nil {
+				s.manifest.Close()
+			}
+			if s.manifestWriter != nil {
+				s.manifestWriter.Close()
+			}
+			if s.manifestFile != nil {
+				s.manifestFile.Remove()
+			}
+			s.manifestFile = file
+			s.manifestWriter = writer
+			s.manifest = jw
+		} else {
+			writer.Close()
+			file.Remove()
+			s.reuseFileNum(num)
+		}
+	}()
+
+	w, err := jw.Next()
+	if err != nil {
+		return
+	}
+	err = rec.encode(w)
+	if err != nil {
+		return
+	}
+	err = jw.Flush()
+	if err != nil {
+		return
+	}
+	err = s.stor.SetManifest(file)
+	return
+}
+
+// Flush record to disk.
+func (s *session) flushManifest(rec *sessionRecord) (err error) {
+	s.fillRecord(rec, false)
+	w, err := s.manifest.Next()
+	if err != nil {
+		return
+	}
+	err = rec.encode(w)
+	if err != nil {
+		return
+	}
+	err = s.manifest.Flush()
+	if err != nil {
+		return
+	}
+	if !s.o.GetNoSync() {
+		err = s.manifestWriter.Sync()
+		if err != nil {
+			return
+		}
+	}
+	s.recordCommited(rec)
+	return
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
new file mode 100644
index 0000000..9e7fc51
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
@@ -0,0 +1,565 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reservefs.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var errFileOpen = errors.New("leveldb/storage: file still open")
+
+type fileLock interface {
+	release() error
+}
+
+type fileStorageLock struct {
+	fs *fileStorage
+}
+
+func (lock *fileStorageLock) Release() {
+	fs := lock.fs
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.slock == lock {
+		fs.slock = nil
+	}
+	return
+}
+
+const logSizeThreshold = 1024 * 1024 // 1 MiB
+
+// fileStorage is a file-system backed storage.
+type fileStorage struct {
+	path string
+
+	mu      sync.Mutex
+	flock   fileLock
+	slock   *fileStorageLock
+	logw    *os.File
+	logSize int
+	buf     []byte
+	// Opened file counter; if open < 0 means closed.
+	open int
+	day  int
+}
+
+// OpenFile returns a new filesytem-backed storage implementation with the given
+// path. This also hold a file lock, so any subsequent attempt to open the same
+// path will fail.
+//
+// The storage must be closed after use, by calling Close method.
+func OpenFile(path string) (Storage, error) {
+	if err := os.MkdirAll(path, 0755); err != nil {
+		return nil, err
+	}
+
+	flock, err := newFileLock(filepath.Join(path, "LOCK"))
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err != nil {
+			flock.release()
+		}
+	}()
+
+	rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old"))
+	logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+	if err != nil {
+		return nil, err
+	}
+	logSize, err := logw.Seek(0, os.SEEK_END)
+	if err != nil {
+		logw.Close()
+		return nil, err
+	}
+
+	fs := &fileStorage{path: path, flock: flock, logw: logw, logSize: int(logSize)}
+	runtime.SetFinalizer(fs, (*fileStorage).Close)
+	return fs, nil
+}
+
+func (fs *fileStorage) Lock() (util.Releaser, error) {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.open < 0 {
+		return nil, ErrClosed
+	}
+	if fs.slock != nil {
+		return nil, ErrLocked
+	}
+	fs.slock = &fileStorageLock{fs: fs}
+	return fs.slock, nil
+}
+
+func itoa(buf []byte, i int, wid int) []byte {
+	var u uint = uint(i)
+	if u == 0 && wid <= 1 {
+		return append(buf, '0')
+	}
+
+	// Assemble decimal in reverse order.
+	var b [32]byte
+	bp := len(b)
+	for ; u > 0 || wid > 0; u /= 10 {
+		bp--
+		wid--
+		b[bp] = byte(u%10) + '0'
+	}
+	return append(buf, b[bp:]...)
+}
+
+func (fs *fileStorage) printDay(t time.Time) {
+	if fs.day == t.Day() {
+		return
+	}
+	fs.day = t.Day()
+	fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
+}
+
+func (fs *fileStorage) doLog(t time.Time, str string) {
+	if fs.logSize > logSizeThreshold {
+		// Rotate log file.
+		fs.logw.Close()
+		fs.logw = nil
+		fs.logSize = 0
+		rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old"))
+	}
+	if fs.logw == nil {
+		var err error
+		fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+		if err != nil {
+			return
+		}
+		// Force printDay on new log file.
+		fs.day = 0
+	}
+	fs.printDay(t)
+	hour, min, sec := t.Clock()
+	msec := t.Nanosecond() / 1e3
+	// time
+	fs.buf = itoa(fs.buf[:0], hour, 2)
+	fs.buf = append(fs.buf, ':')
+	fs.buf = itoa(fs.buf, min, 2)
+	fs.buf = append(fs.buf, ':')
+	fs.buf = itoa(fs.buf, sec, 2)
+	fs.buf = append(fs.buf, '.')
+	fs.buf = itoa(fs.buf, msec, 6)
+	fs.buf = append(fs.buf, ' ')
+	// write
+	fs.buf = append(fs.buf, []byte(str)...)
+	fs.buf = append(fs.buf, '\n')
+	fs.logw.Write(fs.buf)
+}
+
+func (fs *fileStorage) Log(str string) {
+	t := time.Now()
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.open < 0 {
+		return
+	}
+	fs.doLog(t, str)
+}
+
+func (fs *fileStorage) log(str string) {
+	fs.doLog(time.Now(), str)
+}
+
+func (fs *fileStorage) GetFile(num uint64, t FileType) File {
+	return &file{fs: fs, num: num, t: t}
+}
+
+func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.open < 0 {
+		return nil, ErrClosed
+	}
+	dir, err := os.Open(fs.path)
+	if err != nil {
+		return
+	}
+	fnn, err := dir.Readdirnames(0)
+	// Close the dir first before checking for Readdirnames error.
+	if err := dir.Close(); err != nil {
+		fs.log(fmt.Sprintf("close dir: %v", err))
+	}
+	if err != nil {
+		return
+	}
+	f := &file{fs: fs}
+	for _, fn := range fnn {
+		if f.parse(fn) && (f.t&t) != 0 {
+			ff = append(ff, f)
+			f = &file{fs: fs}
+		}
+	}
+	return
+}
+
+func (fs *fileStorage) GetManifest() (f File, err error) {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.open < 0 {
+		return nil, ErrClosed
+	}
+	dir, err := os.Open(fs.path)
+	if err != nil {
+		return
+	}
+	fnn, err := dir.Readdirnames(0)
+	// Close the dir first before checking for Readdirnames error.
+	if err := dir.Close(); err != nil {
+		fs.log(fmt.Sprintf("close dir: %v", err))
+	}
+	if err != nil {
+		return
+	}
+	// Find latest CURRENT file.
+	var rem []string
+	var pend bool
+	var cerr error
+	for _, fn := range fnn {
+		if strings.HasPrefix(fn, "CURRENT") {
+			pend1 := len(fn) > 7
+			// Make sure it is valid name for a CURRENT file, otherwise skip it.
+			if pend1 {
+				if fn[7] != '.' || len(fn) < 9 {
+					fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
+					continue
+				}
+				if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
+					fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
+					continue
+				}
+			}
+			path := filepath.Join(fs.path, fn)
+			r, e1 := os.OpenFile(path, os.O_RDONLY, 0)
+			if e1 != nil {
+				return nil, e1
+			}
+			b, e1 := ioutil.ReadAll(r)
+			if e1 != nil {
+				r.Close()
+				return nil, e1
+			}
+			f1 := &file{fs: fs}
+			if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) {
+				fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn))
+				if pend1 {
+					rem = append(rem, fn)
+				}
+				if !pend1 || cerr == nil {
+					cerr = &ErrCorrupted{
+						File: fsParseName(filepath.Base(fn)),
+						Err:  errors.New("leveldb/storage: corrupted or incomplete manifest file"),
+					}
+				}
+			} else if f != nil && f1.Num() < f.Num() {
+				fs.log(fmt.Sprintf("skipping %s: obsolete", fn))
+				if pend1 {
+					rem = append(rem, fn)
+				}
+			} else {
+				f = f1
+				pend = pend1
+			}
+			if err := r.Close(); err != nil {
+				fs.log(fmt.Sprintf("close %s: %v", fn, err))
+			}
+		}
+	}
+	// Don't remove any files if there is no valid CURRENT file.
+	if f == nil {
+		if cerr != nil {
+			err = cerr
+		} else {
+			err = os.ErrNotExist
+		}
+		return
+	}
+	// Rename pending CURRENT file to an effective CURRENT.
+	if pend {
+		path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num())
+		if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
+			fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err))
+		}
+	}
+	// Remove obsolete or incomplete pending CURRENT files.
+	for _, fn := range rem {
+		path := filepath.Join(fs.path, fn)
+		if err := os.Remove(path); err != nil {
+			fs.log(fmt.Sprintf("remove %s: %v", fn, err))
+		}
+	}
+	return
+}
+
+func (fs *fileStorage) SetManifest(f File) (err error) {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.open < 0 {
+		return ErrClosed
+	}
+	f2, ok := f.(*file)
+	if !ok || f2.t != TypeManifest {
+		return ErrInvalidFile
+	}
+	defer func() {
+		if err != nil {
+			fs.log(fmt.Sprintf("CURRENT: %v", err))
+		}
+	}()
+	path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num())
+	w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+	if err != nil {
+		return err
+	}
+	_, err = fmt.Fprintln(w, f2.name())
+	// Close the file first.
+	if err := w.Close(); err != nil {
+		fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err))
+	}
+	if err != nil {
+		return err
+	}
+	return rename(path, filepath.Join(fs.path, "CURRENT"))
+}
+
+func (fs *fileStorage) Close() error {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.open < 0 {
+		return ErrClosed
+	}
+	// Clear the finalizer.
+	runtime.SetFinalizer(fs, nil)
+
+	if fs.open > 0 {
+		fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
+	}
+	fs.open = -1
+	if fs.logw != nil {
+		fs.logw.Close()
+	}
+	return fs.flock.release()
+}
+
+type fileWrap struct {
+	*os.File
+	f *file
+}
+
+func (fw fileWrap) Sync() error {
+	if err := fw.File.Sync(); err != nil {
+		return err
+	}
+	if fw.f.Type() == TypeManifest {
+		// Also sync parent directory if file type is manifest.
+		// See: https://code.google.com/p/leveldb/issues/detail?id=190.
+		if err := syncDir(fw.f.fs.path); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (fw fileWrap) Close() error {
+	f := fw.f
+	f.fs.mu.Lock()
+	defer f.fs.mu.Unlock()
+	if !f.open {
+		return ErrClosed
+	}
+	f.open = false
+	f.fs.open--
+	err := fw.File.Close()
+	if err != nil {
+		f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err))
+	}
+	return err
+}
+
+type file struct {
+	fs   *fileStorage
+	num  uint64
+	t    FileType
+	open bool
+}
+
+func (f *file) Open() (Reader, error) {
+	f.fs.mu.Lock()
+	defer f.fs.mu.Unlock()
+	if f.fs.open < 0 {
+		return nil, ErrClosed
+	}
+	if f.open {
+		return nil, errFileOpen
+	}
+	of, err := os.OpenFile(f.path(), os.O_RDONLY, 0)
+	if err != nil {
+		if f.hasOldName() && os.IsNotExist(err) {
+			of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0)
+			if err == nil {
+				goto ok
+			}
+		}
+		return nil, err
+	}
+ok:
+	f.open = true
+	f.fs.open++
+	return fileWrap{of, f}, nil
+}
+
+func (f *file) Create() (Writer, error) {
+	f.fs.mu.Lock()
+	defer f.fs.mu.Unlock()
+	if f.fs.open < 0 {
+		return nil, ErrClosed
+	}
+	if f.open {
+		return nil, errFileOpen
+	}
+	of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+	if err != nil {
+		return nil, err
+	}
+	f.open = true
+	f.fs.open++
+	return fileWrap{of, f}, nil
+}
+
+func (f *file) Replace(newfile File) error {
+	f.fs.mu.Lock()
+	defer f.fs.mu.Unlock()
+	if f.fs.open < 0 {
+		return ErrClosed
+	}
+	newfile2, ok := newfile.(*file)
+	if !ok {
+		return ErrInvalidFile
+	}
+	if f.open || newfile2.open {
+		return errFileOpen
+	}
+	return rename(newfile2.path(), f.path())
+}
+
+func (f *file) Type() FileType {
+	return f.t
+}
+
+func (f *file) Num() uint64 {
+	return f.num
+}
+
+func (f *file) Remove() error {
+	f.fs.mu.Lock()
+	defer f.fs.mu.Unlock()
+	if f.fs.open < 0 {
+		return ErrClosed
+	}
+	if f.open {
+		return errFileOpen
+	}
+	err := os.Remove(f.path())
+	if err != nil {
+		f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err))
+	}
+	// Also try remove file with old name, just in case.
+	if f.hasOldName() {
+		if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) {
+			f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err))
+			err = e1
+		}
+	}
+	return err
+}
+
+func (f *file) hasOldName() bool {
+	return f.t == TypeTable
+}
+
+func (f *file) oldName() string {
+	switch f.t {
+	case TypeTable:
+		return fmt.Sprintf("%06d.sst", f.num)
+	}
+	return f.name()
+}
+
+func (f *file) oldPath() string {
+	return filepath.Join(f.fs.path, f.oldName())
+}
+
+func (f *file) name() string {
+	switch f.t {
+	case TypeManifest:
+		return fmt.Sprintf("MANIFEST-%06d", f.num)
+	case TypeJournal:
+		return fmt.Sprintf("%06d.log", f.num)
+	case TypeTable:
+		return fmt.Sprintf("%06d.ldb", f.num)
+	case TypeTemp:
+		return fmt.Sprintf("%06d.tmp", f.num)
+	default:
+		panic("invalid file type")
+	}
+}
+
+func (f *file) path() string {
+	return filepath.Join(f.fs.path, f.name())
+}
+
+func fsParseName(name string) *FileInfo {
+	fi := &FileInfo{}
+	var tail string
+	_, err := fmt.Sscanf(name, "%d.%s", &fi.Num, &tail)
+	if err == nil {
+		switch tail {
+		case "log":
+			fi.Type = TypeJournal
+		case "ldb", "sst":
+			fi.Type = TypeTable
+		case "tmp":
+			fi.Type = TypeTemp
+		default:
+			return nil
+		}
+		return fi
+	}
+	n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fi.Num, &tail)
+	if n == 1 {
+		fi.Type = TypeManifest
+		return fi
+	}
+	return nil
+}
+
+func (f *file) parse(name string) bool {
+	fi := fsParseName(name)
+	if fi == nil {
+		return false
+	}
+	f.t = fi.Type
+	f.num = fi.Num
+	return true
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
new file mode 100644
index 0000000..42940d7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+	"os"
+	"path/filepath"
+)
+
+type plan9FileLock struct {
+	f *os.File
+}
+
+func (fl *plan9FileLock) release() error {
+	return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
+	if err != nil {
+		return
+	}
+	fl = &plan9FileLock{f: f}
+	return
+}
+
+func rename(oldpath, newpath string) error {
+	if _, err := os.Stat(newpath); err == nil {
+		if err := os.Remove(newpath); err != nil {
+			return err
+		}
+	}
+
+	_, fname := filepath.Split(newpath)
+	return os.Rename(oldpath, fname)
+}
+
+func syncDir(name string) error {
+	f, err := os.Open(name)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	if err := f.Sync(); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
new file mode 100644
index 0000000..102031b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package storage
+
+import (
+	"os"
+	"syscall"
+)
+
+type unixFileLock struct {
+	f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+	if err := setFileLock(fl.f, false); err != nil {
+		return err
+	}
+	return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
+	if err != nil {
+		return
+	}
+	err = setFileLock(f, true)
+	if err != nil {
+		f.Close()
+		return
+	}
+	fl = &unixFileLock{f: f}
+	return
+}
+
+func setFileLock(f *os.File, lock bool) error {
+	flock := syscall.Flock_t{
+		Type:   syscall.F_UNLCK,
+		Start:  0,
+		Len:    0,
+		Whence: 1,
+	}
+	if lock {
+		flock.Type = syscall.F_WRLCK
+	}
+	return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
+}
+
+func rename(oldpath, newpath string) error {
+	return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+	f, err := os.Open(name)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	if err := f.Sync(); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go
new file mode 100644
index 0000000..92abcbb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+var cases = []struct {
+	oldName []string
+	name    string
+	ftype   FileType
+	num     uint64
+}{
+	{nil, "000100.log", TypeJournal, 100},
+	{nil, "000000.log", TypeJournal, 0},
+	{[]string{"000000.sst"}, "000000.ldb", TypeTable, 0},
+	{nil, "MANIFEST-000002", TypeManifest, 2},
+	{nil, "MANIFEST-000007", TypeManifest, 7},
+	{nil, "18446744073709551615.log", TypeJournal, 18446744073709551615},
+	{nil, "000100.tmp", TypeTemp, 100},
+}
+
+var invalidCases = []string{
+	"",
+	"foo",
+	"foo-dx-100.log",
+	".log",
+	"",
+	"manifest",
+	"CURREN",
+	"CURRENTX",
+	"MANIFES",
+	"MANIFEST",
+	"MANIFEST-",
+	"XMANIFEST-3",
+	"MANIFEST-3x",
+	"LOC",
+	"LOCKx",
+	"LO",
+	"LOGx",
+	"18446744073709551616.log",
+	"184467440737095516150.log",
+	"100",
+	"100.",
+	"100.lop",
+}
+
+func TestFileStorage_CreateFileName(t *testing.T) {
+	for _, c := range cases {
+		f := &file{num: c.num, t: c.ftype}
+		if f.name() != c.name {
+			t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name)
+		}
+	}
+}
+
+func TestFileStorage_ParseFileName(t *testing.T) {
+	for _, c := range cases {
+		for _, name := range append([]string{c.name}, c.oldName...) {
+			f := new(file)
+			if !f.parse(name) {
+				t.Errorf("cannot parse filename '%s'", name)
+				continue
+			}
+			if f.Type() != c.ftype {
+				t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype)
+			}
+			if f.Num() != c.num {
+				t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num)
+			}
+		}
+	}
+}
+
+func TestFileStorage_InvalidFileName(t *testing.T) {
+	for _, name := range invalidCases {
+		f := new(file)
+		if f.parse(name) {
+			t.Errorf("filename '%s' should be invalid", name)
+		}
+	}
+}
+
+func TestFileStorage_Locking(t *testing.T) {
+	path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid()))
+
+	_, err := os.Stat(path)
+	if err == nil {
+		err = os.RemoveAll(path)
+		if err != nil {
+			t.Fatal("RemoveAll: got error: ", err)
+		}
+	}
+
+	p1, err := OpenFile(path)
+	if err != nil {
+		t.Fatal("OpenFile(1): got error: ", err)
+	}
+
+	defer os.RemoveAll(path)
+
+	p2, err := OpenFile(path)
+	if err != nil {
+		t.Logf("OpenFile(2): got error: %s (expected)", err)
+	} else {
+		p2.Close()
+		p1.Close()
+		t.Fatal("OpenFile(2): expect error")
+	}
+
+	p1.Close()
+
+	p3, err := OpenFile(path)
+	if err != nil {
+		t.Fatal("OpenFile(3): got error: ", err)
+	}
+	defer p3.Close()
+
+	l, err := p3.Lock()
+	if err != nil {
+		t.Fatal("storage lock failed(1): ", err)
+	}
+	_, err = p3.Lock()
+	if err == nil {
+		t.Fatal("expect error for second storage lock attempt")
+	} else {
+		t.Logf("storage lock got error: %s (expected)", err)
+	}
+	l.Release()
+	_, err = p3.Lock()
+	if err != nil {
+		t.Fatal("storage lock failed(2): ", err)
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
new file mode 100644
index 0000000..6eb3274
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package storage
+
+import (
+	"os"
+	"syscall"
+)
+
+type unixFileLock struct {
+	f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+	if err := setFileLock(fl.f, false); err != nil {
+		return err
+	}
+	return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
+	if err != nil {
+		return
+	}
+	err = setFileLock(f, true)
+	if err != nil {
+		f.Close()
+		return
+	}
+	fl = &unixFileLock{f: f}
+	return
+}
+
+func setFileLock(f *os.File, lock bool) error {
+	how := syscall.LOCK_UN
+	if lock {
+		how = syscall.LOCK_EX
+	}
+	return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB)
+}
+
+func rename(oldpath, newpath string) error {
+	return os.Rename(oldpath, newpath)
+}
+
+func isErrInvalid(err error) bool {
+	if err == os.ErrInvalid {
+		return true
+	}
+	if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
+		return true
+	}
+	return false
+}
+
+func syncDir(name string) error {
+	f, err := os.Open(name)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	if err := f.Sync(); err != nil && !isErrInvalid(err) {
+		return err
+	}
+	return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
new file mode 100644
index 0000000..50c3c45
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
@@ -0,0 +1,69 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var (
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+	procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+const (
+	_MOVEFILE_REPLACE_EXISTING = 1
+)
+
+type windowsFileLock struct {
+	fd syscall.Handle
+}
+
+func (fl *windowsFileLock) release() error {
+	return syscall.Close(fl.fd)
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+	pathp, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return
+	}
+	fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+	if err != nil {
+		return
+	}
+	fl = &windowsFileLock{fd: fd}
+	return
+}
+
+func moveFileEx(from *uint16, to *uint16, flags uint32) error {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+	if r1 == 0 {
+		if e1 != 0 {
+			return error(e1)
+		} else {
+			return syscall.EINVAL
+		}
+	}
+	return nil
+}
+
+func rename(oldpath, newpath string) error {
+	from, err := syscall.UTF16PtrFromString(oldpath)
+	if err != nil {
+		return err
+	}
+	to, err := syscall.UTF16PtrFromString(newpath)
+	if err != nil {
+		return err
+	}
+	return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
+}
+
+func syncDir(name string) error { return nil }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
new file mode 100644
index 0000000..855a39b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
@@ -0,0 +1,203 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+	"bytes"
+	"os"
+	"sync"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+const typeShift = 3
+
+type memStorageLock struct {
+	ms *memStorage
+}
+
+func (lock *memStorageLock) Release() {
+	ms := lock.ms
+	ms.mu.Lock()
+	defer ms.mu.Unlock()
+	if ms.slock == lock {
+		ms.slock = nil
+	}
+	return
+}
+
+// memStorage is a memory-backed storage.
+type memStorage struct {
+	mu       sync.Mutex
+	slock    *memStorageLock
+	files    map[uint64]*memFile
+	manifest *memFilePtr
+}
+
+// NewMemStorage returns a new memory-backed storage implementation.
+func NewMemStorage() Storage {
+	return &memStorage{
+		files: make(map[uint64]*memFile),
+	}
+}
+
+func (ms *memStorage) Lock() (util.Releaser, error) {
+	ms.mu.Lock()
+	defer ms.mu.Unlock()
+	if ms.slock != nil {
+		return nil, ErrLocked
+	}
+	ms.slock = &memStorageLock{ms: ms}
+	return ms.slock, nil
+}
+
+func (*memStorage) Log(str string) {}
+
+func (ms *memStorage) GetFile(num uint64, t FileType) File {
+	return &memFilePtr{ms: ms, num: num, t: t}
+}
+
+func (ms *memStorage) GetFiles(t FileType) ([]File, error) {
+	ms.mu.Lock()
+	var ff []File
+	for x, _ := range ms.files {
+		num, mt := x>>typeShift, FileType(x)&TypeAll
+		if mt&t == 0 {
+			continue
+		}
+		ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt})
+	}
+	ms.mu.Unlock()
+	return ff, nil
+}
+
+func (ms *memStorage) GetManifest() (File, error) {
+	ms.mu.Lock()
+	defer ms.mu.Unlock()
+	if ms.manifest == nil {
+		return nil, os.ErrNotExist
+	}
+	return ms.manifest, nil
+}
+
+func (ms *memStorage) SetManifest(f File) error {
+	fm, ok := f.(*memFilePtr)
+	if !ok || fm.t != TypeManifest {
+		return ErrInvalidFile
+	}
+	ms.mu.Lock()
+	ms.manifest = fm
+	ms.mu.Unlock()
+	return nil
+}
+
+func (*memStorage) Close() error { return nil }
+
+type memReader struct {
+	*bytes.Reader
+	m *memFile
+}
+
+func (mr *memReader) Close() error {
+	return mr.m.Close()
+}
+
+type memFile struct {
+	bytes.Buffer
+	ms   *memStorage
+	open bool
+}
+
+func (*memFile) Sync() error { return nil }
+func (m *memFile) Close() error {
+	m.ms.mu.Lock()
+	m.open = false
+	m.ms.mu.Unlock()
+	return nil
+}
+
+type memFilePtr struct {
+	ms  *memStorage
+	num uint64
+	t   FileType
+}
+
+func (p *memFilePtr) x() uint64 {
+	return p.Num()<<typeShift | uint64(p.Type())
+}
+
+func (p *memFilePtr) Open() (Reader, error) {
+	ms := p.ms
+	ms.mu.Lock()
+	defer ms.mu.Unlock()
+	if m, exist := ms.files[p.x()]; exist {
+		if m.open {
+			return nil, errFileOpen
+		}
+		m.open = true
+		return &memReader{Reader: bytes.NewReader(m.Bytes()), m: m}, nil
+	}
+	return nil, os.ErrNotExist
+}
+
+func (p *memFilePtr) Create() (Writer, error) {
+	ms := p.ms
+	ms.mu.Lock()
+	defer ms.mu.Unlock()
+	m, exist := ms.files[p.x()]
+	if exist {
+		if m.open {
+			return nil, errFileOpen
+		}
+		m.Reset()
+	} else {
+		m = &memFile{ms: ms}
+		ms.files[p.x()] = m
+	}
+	m.open = true
+	return m, nil
+}
+
+func (p *memFilePtr) Replace(newfile File) error {
+	p1, ok := newfile.(*memFilePtr)
+	if !ok {
+		return ErrInvalidFile
+	}
+	ms := p.ms
+	ms.mu.Lock()
+	defer ms.mu.Unlock()
+	m1, exist := ms.files[p1.x()]
+	if !exist {
+		return os.ErrNotExist
+	}
+	m0, exist := ms.files[p.x()]
+	if (exist && m0.open) || m1.open {
+		return errFileOpen
+	}
+	delete(ms.files, p1.x())
+	ms.files[p.x()] = m1
+	return nil
+}
+
+func (p *memFilePtr) Type() FileType {
+	return p.t
+}
+
+func (p *memFilePtr) Num() uint64 {
+	return p.num
+}
+
+func (p *memFilePtr) Remove() error {
+	ms := p.ms
+	ms.mu.Lock()
+	defer ms.mu.Unlock()
+	if _, exist := ms.files[p.x()]; exist {
+		delete(ms.files, p.x())
+		return nil
+	}
+	return os.ErrNotExist
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go
new file mode 100644
index 0000000..23bb074
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go
@@ -0,0 +1,66 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+	"bytes"
+	"testing"
+)
+
+func TestMemStorage(t *testing.T) {
+	m := NewMemStorage()
+
+	l, err := m.Lock()
+	if err != nil {
+		t.Fatal("storage lock failed(1): ", err)
+	}
+	_, err = m.Lock()
+	if err == nil {
+		t.Fatal("expect error for second storage lock attempt")
+	} else {
+		t.Logf("storage lock got error: %s (expected)", err)
+	}
+	l.Release()
+	_, err = m.Lock()
+	if err != nil {
+		t.Fatal("storage lock failed(2): ", err)
+	}
+
+	f := m.GetFile(1, TypeTable)
+	if f.Num() != 1 && f.Type() != TypeTable {
+		t.Fatal("invalid file number and type")
+	}
+	w, _ := f.Create()
+	w.Write([]byte("abc"))
+	w.Close()
+	if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 {
+		t.Fatal("invalid GetFiles len")
+	}
+	buf := new(bytes.Buffer)
+	r, err := f.Open()
+	if err != nil {
+		t.Fatal("Open: got error: ", err)
+	}
+	buf.ReadFrom(r)
+	r.Close()
+	if got := buf.String(); got != "abc" {
+		t.Fatalf("Read: invalid value, want=abc got=%s", got)
+	}
+	if _, err := f.Open(); err != nil {
+		t.Fatal("Open: got error: ", err)
+	}
+	if _, err := m.GetFile(1, TypeTable).Open(); err == nil {
+		t.Fatal("expecting error")
+	}
+	f.Remove()
+	if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 {
+		t.Fatal("invalid GetFiles len", len(ff))
+	}
+	if _, err := f.Open(); err == nil {
+		t.Fatal("expecting error")
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
new file mode 100644
index 0000000..89d5dc5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package storage provides storage abstraction for LevelDB.
+package storage
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type FileType uint32
+
+const (
+	TypeManifest FileType = 1 << iota
+	TypeJournal
+	TypeTable
+	TypeTemp
+
+	TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp
+)
+
+func (t FileType) String() string {
+	switch t {
+	case TypeManifest:
+		return "manifest"
+	case TypeJournal:
+		return "journal"
+	case TypeTable:
+		return "table"
+	case TypeTemp:
+		return "temp"
+	}
+	return fmt.Sprintf("<unknown:%d>", t)
+}
+
+var (
+	ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument")
+	ErrLocked      = errors.New("leveldb/storage: already locked")
+	ErrClosed      = errors.New("leveldb/storage: closed")
+)
+
+// ErrCorrupted is the type that wraps errors that indicate corruption of
+// a file. Package storage has its own type instead of using
+// errors.ErrCorrupted to prevent circular import.
+type ErrCorrupted struct {
+	File *FileInfo
+	Err  error
+}
+
+func (e *ErrCorrupted) Error() string {
+	if e.File != nil {
+		return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
+	} else {
+		return e.Err.Error()
+	}
+}
+
+// Syncer is the interface that wraps basic Sync method.
+type Syncer interface {
+	// Sync commits the current contents of the file to stable storage.
+	Sync() error
+}
+
+// Reader is the interface that groups the basic Read, Seek, ReadAt and Close
+// methods.
+type Reader interface {
+	io.ReadSeeker
+	io.ReaderAt
+	io.Closer
+}
+
+// Writer is the interface that groups the basic Write, Sync and Close
+// methods.
+type Writer interface {
+	io.WriteCloser
+	Syncer
+}
+
+// File is the file. A file instance must be goroutine-safe.
+type File interface {
+	// Open opens the file for read. Returns os.ErrNotExist error
+	// if the file does not exist.
+	// Returns ErrClosed if the underlying storage is closed.
+	Open() (r Reader, err error)
+
+	// Create creates the file for writting. Truncate the file if
+	// already exist.
+	// Returns ErrClosed if the underlying storage is closed.
+	Create() (w Writer, err error)
+
+	// Replace replaces file with newfile.
+	// Returns ErrClosed if the underlying storage is closed.
+	Replace(newfile File) error
+
+	// Type returns the file type
+	Type() FileType
+
+	// Num returns the file number.
+	Num() uint64
+
+	// Remove removes the file.
+	// Returns ErrClosed if the underlying storage is closed.
+	Remove() error
+}
+
+// Storage is the storage. A storage instance must be goroutine-safe.
+type Storage interface {
+	// Lock locks the storage. Any subsequent attempt to call Lock will fail
+	// until the last lock released.
+	// After use the caller should call the Release method.
+	Lock() (l util.Releaser, err error)
+
+	// Log logs a string. This is used for logging. An implementation
+	// may write to a file, stdout or simply do nothing.
+	Log(str string)
+
+	// GetFile returns a file for the given number and type. GetFile will never
+	// returns nil, even if the underlying storage is closed.
+	GetFile(num uint64, t FileType) File
+
+	// GetFiles returns a slice of files that match the given file types.
+	// The file types may be OR'ed together.
+	GetFiles(t FileType) ([]File, error)
+
+	// GetManifest returns a manifest file. Returns os.ErrNotExist if manifest
+	// file does not exist.
+	GetManifest() (File, error)
+
+	// SetManifest sets the given file as manifest file. The given file should
+	// be a manifest file type or error will be returned.
+	SetManifest(f File) error
+
+	// Close closes the storage. It is valid to call Close multiple times.
+	// Other methods should not be called after the storage has been closed.
+	Close() error
+}
+
+// FileInfo wraps basic file info.
+type FileInfo struct {
+	Type FileType
+	Num  uint64
+}
+
+func (fi FileInfo) String() string {
+	switch fi.Type {
+	case TypeManifest:
+		return fmt.Sprintf("MANIFEST-%06d", fi.Num)
+	case TypeJournal:
+		return fmt.Sprintf("%06d.log", fi.Num)
+	case TypeTable:
+		return fmt.Sprintf("%06d.ldb", fi.Num)
+	case TypeTemp:
+		return fmt.Sprintf("%06d.tmp", fi.Num)
+	default:
+		return fmt.Sprintf("%#x-%d", fi.Type, fi.Num)
+	}
+}
+
+// NewFileInfo creates new FileInfo from the given File. It will returns nil
+// if File is nil.
+func NewFileInfo(f File) *FileInfo {
+	if f == nil {
+		return nil
+	}
+	return &FileInfo{f.Type(), f.Num()}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
new file mode 100644
index 0000000..18b6ff0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
@@ -0,0 +1,549 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENE file.
+
+package leveldb
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"path/filepath"
+	"sync"
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+const typeShift = 4
+
+var (
+	tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument")
+	tsErrFileOpen    = errors.New("leveldb.testStorage: file still open")
+)
+
+var (
+	tsFSEnv   = os.Getenv("GOLEVELDB_USEFS")
+	tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR")
+	tsKeepFS  = tsFSEnv == "2"
+	tsFS      = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
+	tsMU      = &sync.Mutex{}
+	tsNum     = 0
+)
+
+type tsOp uint
+
+const (
+	tsOpOpen tsOp = iota
+	tsOpCreate
+	tsOpReplace
+	tsOpRemove
+	tsOpRead
+	tsOpReadAt
+	tsOpWrite
+	tsOpSync
+
+	tsOpNum
+)
+
+type tsLock struct {
+	ts *testStorage
+	r  util.Releaser
+}
+
+func (l tsLock) Release() {
+	l.r.Release()
+	l.ts.t.Log("I: storage lock released")
+}
+
+type tsReader struct {
+	tf tsFile
+	storage.Reader
+}
+
+func (tr tsReader) Read(b []byte) (n int, err error) {
+	ts := tr.tf.ts
+	ts.countRead(tr.tf.Type())
+	if tr.tf.shouldErrLocked(tsOpRead) {
+		return 0, errors.New("leveldb.testStorage: emulated read error")
+	}
+	n, err = tr.Reader.Read(b)
+	if err != nil && err != io.EOF {
+		ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err)
+	}
+	return
+}
+
+func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) {
+	ts := tr.tf.ts
+	ts.countRead(tr.tf.Type())
+	if tr.tf.shouldErrLocked(tsOpReadAt) {
+		return 0, errors.New("leveldb.testStorage: emulated readAt error")
+	}
+	n, err = tr.Reader.ReadAt(b, off)
+	if err != nil && err != io.EOF {
+		ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err)
+	}
+	return
+}
+
+func (tr tsReader) Close() (err error) {
+	err = tr.Reader.Close()
+	tr.tf.close("reader", err)
+	return
+}
+
+type tsWriter struct {
+	tf tsFile
+	storage.Writer
+}
+
+func (tw tsWriter) Write(b []byte) (n int, err error) {
+	if tw.tf.shouldErrLocked(tsOpWrite) {
+		return 0, errors.New("leveldb.testStorage: emulated write error")
+	}
+	n, err = tw.Writer.Write(b)
+	if err != nil {
+		tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
+	}
+	return
+}
+
+func (tw tsWriter) Sync() (err error) {
+	ts := tw.tf.ts
+	ts.mu.Lock()
+	for ts.emuDelaySync&tw.tf.Type() != 0 {
+		ts.cond.Wait()
+	}
+	ts.mu.Unlock()
+	if tw.tf.shouldErrLocked(tsOpSync) {
+		return errors.New("leveldb.testStorage: emulated sync error")
+	}
+	err = tw.Writer.Sync()
+	if err != nil {
+		tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
+	}
+	return
+}
+
+func (tw tsWriter) Close() (err error) {
+	err = tw.Writer.Close()
+	tw.tf.close("writer", err)
+	return
+}
+
+type tsFile struct {
+	ts *testStorage
+	storage.File
+}
+
+func (tf tsFile) x() uint64 {
+	return tf.Num()<<typeShift | uint64(tf.Type())
+}
+
+func (tf tsFile) shouldErr(op tsOp) bool {
+	return tf.ts.shouldErr(tf, op)
+}
+
+func (tf tsFile) shouldErrLocked(op tsOp) bool {
+	tf.ts.mu.Lock()
+	defer tf.ts.mu.Unlock()
+	return tf.shouldErr(op)
+}
+
+func (tf tsFile) checkOpen(m string) error {
+	ts := tf.ts
+	if writer, ok := ts.opens[tf.x()]; ok {
+		if writer {
+			ts.t.Errorf("E: cannot %s file, num=%d type=%v: a writer still open", m, tf.Num(), tf.Type())
+		} else {
+			ts.t.Errorf("E: cannot %s file, num=%d type=%v: a reader still open", m, tf.Num(), tf.Type())
+		}
+		return tsErrFileOpen
+	}
+	return nil
+}
+
+func (tf tsFile) close(m string, err error) {
+	ts := tf.ts
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	if _, ok := ts.opens[tf.x()]; !ok {
+		ts.t.Errorf("E: %s: redudant file closing, num=%d type=%v", m, tf.Num(), tf.Type())
+	} else if err == nil {
+		ts.t.Logf("I: %s: file closed, num=%d type=%v", m, tf.Num(), tf.Type())
+	}
+	delete(ts.opens, tf.x())
+	if err != nil {
+		ts.t.Errorf("E: %s: cannot close file, num=%d type=%v: %v", m, tf.Num(), tf.Type(), err)
+	}
+}
+
+func (tf tsFile) Open() (r storage.Reader, err error) {
+	ts := tf.ts
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	err = tf.checkOpen("open")
+	if err != nil {
+		return
+	}
+	if tf.shouldErr(tsOpOpen) {
+		err = errors.New("leveldb.testStorage: emulated open error")
+		return
+	}
+	r, err = tf.File.Open()
+	if err != nil {
+		if ts.ignoreOpenErr&tf.Type() != 0 {
+			ts.t.Logf("I: cannot open file, num=%d type=%v: %v (ignored)", tf.Num(), tf.Type(), err)
+		} else {
+			ts.t.Errorf("E: cannot open file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
+		}
+	} else {
+		ts.t.Logf("I: file opened, num=%d type=%v", tf.Num(), tf.Type())
+		ts.opens[tf.x()] = false
+		r = tsReader{tf, r}
+	}
+	return
+}
+
+func (tf tsFile) Create() (w storage.Writer, err error) {
+	ts := tf.ts
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	err = tf.checkOpen("create")
+	if err != nil {
+		return
+	}
+	if tf.shouldErr(tsOpCreate) {
+		err = errors.New("leveldb.testStorage: emulated create error")
+		return
+	}
+	w, err = tf.File.Create()
+	if err != nil {
+		ts.t.Errorf("E: cannot create file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
+	} else {
+		ts.t.Logf("I: file created, num=%d type=%v", tf.Num(), tf.Type())
+		ts.opens[tf.x()] = true
+		w = tsWriter{tf, w}
+	}
+	return
+}
+
+func (tf tsFile) Replace(newfile storage.File) (err error) {
+	ts := tf.ts
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	err = tf.checkOpen("replace")
+	if err != nil {
+		return
+	}
+	if tf.shouldErr(tsOpReplace) {
+		err = errors.New("leveldb.testStorage: emulated create error")
+		return
+	}
+	err = tf.File.Replace(newfile.(tsFile).File)
+	if err != nil {
+		ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
+	} else {
+		ts.t.Logf("I: file replace, num=%d type=%v", tf.Num(), tf.Type())
+	}
+	return
+}
+
+func (tf tsFile) Remove() (err error) {
+	ts := tf.ts
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	err = tf.checkOpen("remove")
+	if err != nil {
+		return
+	}
+	if tf.shouldErr(tsOpRemove) {
+		err = errors.New("leveldb.testStorage: emulated create error")
+		return
+	}
+	err = tf.File.Remove()
+	if err != nil {
+		ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
+	} else {
+		ts.t.Logf("I: file removed, num=%d type=%v", tf.Num(), tf.Type())
+	}
+	return
+}
+
+type testStorage struct {
+	t *testing.T
+	storage.Storage
+	closeFn func() error
+
+	mu   sync.Mutex
+	cond sync.Cond
+	// Open files, true=writer, false=reader
+	opens         map[uint64]bool
+	emuDelaySync  storage.FileType
+	ignoreOpenErr storage.FileType
+	readCnt       uint64
+	readCntEn     storage.FileType
+
+	emuErr         [tsOpNum]storage.FileType
+	emuErrOnce     [tsOpNum]storage.FileType
+	emuRandErr     [tsOpNum]storage.FileType
+	emuRandErrProb int
+	emuErrOnceMap  map[uint64]uint
+	emuRandRand    *rand.Rand
+}
+
+func (ts *testStorage) shouldErr(tf tsFile, op tsOp) bool {
+	if ts.emuErr[op]&tf.Type() != 0 {
+		return true
+	} else if ts.emuRandErr[op]&tf.Type() != 0 || ts.emuErrOnce[op]&tf.Type() != 0 {
+		sop := uint(1) << op
+		eop := ts.emuErrOnceMap[tf.x()]
+		if eop&sop == 0 && (ts.emuRandRand.Int()%ts.emuRandErrProb == 0 || ts.emuErrOnce[op]&tf.Type() != 0) {
+			ts.emuErrOnceMap[tf.x()] = eop | sop
+			ts.t.Logf("I: emulated error: file=%d type=%v op=%v", tf.Num(), tf.Type(), op)
+			return true
+		}
+	}
+	return false
+}
+
+func (ts *testStorage) SetEmuErr(t storage.FileType, ops ...tsOp) {
+	ts.mu.Lock()
+	for _, op := range ops {
+		ts.emuErr[op] = t
+	}
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) SetEmuErrOnce(t storage.FileType, ops ...tsOp) {
+	ts.mu.Lock()
+	for _, op := range ops {
+		ts.emuErrOnce[op] = t
+	}
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) SetEmuRandErr(t storage.FileType, ops ...tsOp) {
+	ts.mu.Lock()
+	for _, op := range ops {
+		ts.emuRandErr[op] = t
+	}
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) SetEmuRandErrProb(prob int) {
+	ts.mu.Lock()
+	ts.emuRandErrProb = prob
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) DelaySync(t storage.FileType) {
+	ts.mu.Lock()
+	ts.emuDelaySync |= t
+	ts.cond.Broadcast()
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) ReleaseSync(t storage.FileType) {
+	ts.mu.Lock()
+	ts.emuDelaySync &= ^t
+	ts.cond.Broadcast()
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) ReadCounter() uint64 {
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	return ts.readCnt
+}
+
+func (ts *testStorage) ResetReadCounter() {
+	ts.mu.Lock()
+	ts.readCnt = 0
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) SetReadCounter(t storage.FileType) {
+	ts.mu.Lock()
+	ts.readCntEn = t
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) countRead(t storage.FileType) {
+	ts.mu.Lock()
+	if ts.readCntEn&t != 0 {
+		ts.readCnt++
+	}
+	ts.mu.Unlock()
+}
+
+func (ts *testStorage) SetIgnoreOpenErr(t storage.FileType) {
+	ts.ignoreOpenErr = t
+}
+
+func (ts *testStorage) Lock() (r util.Releaser, err error) {
+	r, err = ts.Storage.Lock()
+	if err != nil {
+		ts.t.Logf("W: storage locking failed: %v", err)
+	} else {
+		ts.t.Log("I: storage locked")
+		r = tsLock{ts, r}
+	}
+	return
+}
+
+func (ts *testStorage) Log(str string) {
+	ts.t.Log("L: " + str)
+	ts.Storage.Log(str)
+}
+
+func (ts *testStorage) GetFile(num uint64, t storage.FileType) storage.File {
+	return tsFile{ts, ts.Storage.GetFile(num, t)}
+}
+
+func (ts *testStorage) GetFiles(t storage.FileType) (ff []storage.File, err error) {
+	ff0, err := ts.Storage.GetFiles(t)
+	if err != nil {
+		ts.t.Errorf("E: get files failed: %v", err)
+		return
+	}
+	ff = make([]storage.File, len(ff0))
+	for i, f := range ff0 {
+		ff[i] = tsFile{ts, f}
+	}
+	ts.t.Logf("I: get files, type=0x%x count=%d", int(t), len(ff))
+	return
+}
+
+func (ts *testStorage) GetManifest() (f storage.File, err error) {
+	f0, err := ts.Storage.GetManifest()
+	if err != nil {
+		if !os.IsNotExist(err) {
+			ts.t.Errorf("E: get manifest failed: %v", err)
+		}
+		return
+	}
+	f = tsFile{ts, f0}
+	ts.t.Logf("I: get manifest, num=%d", f.Num())
+	return
+}
+
+func (ts *testStorage) SetManifest(f storage.File) error {
+	tf, ok := f.(tsFile)
+	if !ok {
+		ts.t.Error("E: set manifest failed: type assertion failed")
+		return tsErrInvalidFile
+	} else if tf.Type() != storage.TypeManifest {
+		ts.t.Errorf("E: set manifest failed: invalid file type: %s", tf.Type())
+		return tsErrInvalidFile
+	}
+	err := ts.Storage.SetManifest(tf.File)
+	if err != nil {
+		ts.t.Errorf("E: set manifest failed: %v", err)
+	} else {
+		ts.t.Logf("I: set manifest, num=%d", tf.Num())
+	}
+	return err
+}
+
+func (ts *testStorage) Close() error {
+	ts.CloseCheck()
+	err := ts.Storage.Close()
+	if err != nil {
+		ts.t.Errorf("E: closing storage failed: %v", err)
+	} else {
+		ts.t.Log("I: storage closed")
+	}
+	if ts.closeFn != nil {
+		if err := ts.closeFn(); err != nil {
+			ts.t.Errorf("E: close function: %v", err)
+		}
+	}
+	return err
+}
+
+func (ts *testStorage) CloseCheck() {
+	ts.mu.Lock()
+	if len(ts.opens) == 0 {
+		ts.t.Log("I: all files are closed")
+	} else {
+		ts.t.Errorf("E: %d files still open", len(ts.opens))
+		for x, writer := range ts.opens {
+			num, tt := x>>typeShift, storage.FileType(x)&storage.TypeAll
+			ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer)
+		}
+	}
+	ts.mu.Unlock()
+}
+
+func newTestStorage(t *testing.T) *testStorage {
+	var stor storage.Storage
+	var closeFn func() error
+	if tsFS {
+		for {
+			tsMU.Lock()
+			num := tsNum
+			tsNum++
+			tsMU.Unlock()
+			tempdir := tsTempdir
+			if tempdir == "" {
+				tempdir = os.TempDir()
+			}
+			path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
+			if _, err := os.Stat(path); err != nil {
+				stor, err = storage.OpenFile(path)
+				if err != nil {
+					t.Fatalf("F: cannot create storage: %v", err)
+				}
+				t.Logf("I: storage created: %s", path)
+				closeFn = func() error {
+					for _, name := range []string{"LOG.old", "LOG"} {
+						f, err := os.Open(filepath.Join(path, name))
+						if err != nil {
+							continue
+						}
+						if log, err := ioutil.ReadAll(f); err != nil {
+							t.Logf("---------------------- %s ----------------------", name)
+							t.Logf("cannot read log: %v", err)
+							t.Logf("---------------------- %s ----------------------", name)
+						} else if len(log) > 0 {
+							t.Logf("---------------------- %s ----------------------\n%s", name, string(log))
+							t.Logf("---------------------- %s ----------------------", name)
+						}
+						f.Close()
+					}
+					if t.Failed() {
+						t.Logf("testing failed, test DB preserved at %s", path)
+						return nil
+					}
+					if tsKeepFS {
+						return nil
+					}
+					return os.RemoveAll(path)
+				}
+
+				break
+			}
+		}
+	} else {
+		stor = storage.NewMemStorage()
+	}
+	ts := &testStorage{
+		t:              t,
+		Storage:        stor,
+		closeFn:        closeFn,
+		opens:          make(map[uint64]bool),
+		emuErrOnceMap:  make(map[uint64]uint),
+		emuRandErrProb: 0x999,
+		emuRandRand:    rand.New(rand.NewSource(0xfacedead)),
+	}
+	ts.cond.L = &ts.mu
+	return ts
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
new file mode 100644
index 0000000..4604843
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
@@ -0,0 +1,529 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"fmt"
+	"sort"
+	"sync/atomic"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// tFile holds basic information about a table.
+type tFile struct {
+	file       storage.File
+	seekLeft   int32
+	size       uint64
+	imin, imax iKey
+}
+
+// Returns true if given key is after largest key of this table.
+func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
+	return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
+}
+
+// Returns true if given key is before smallest key of this table.
+func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
+	return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
+}
+
+// Returns true if given key range overlaps with this table key range.
+func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
+	return !t.after(icmp, umin) && !t.before(icmp, umax)
+}
+
+// Cosumes one seek and return current seeks left.
+func (t *tFile) consumeSeek() int32 {
+	return atomic.AddInt32(&t.seekLeft, -1)
+}
+
+// Creates new tFile.
+func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile {
+	f := &tFile{
+		file: file,
+		size: size,
+		imin: imin,
+		imax: imax,
+	}
+
+	// We arrange to automatically compact this file after
+	// a certain number of seeks.  Let's assume:
+	//   (1) One seek costs 10ms
+	//   (2) Writing or reading 1MB costs 10ms (100MB/s)
+	//   (3) A compaction of 1MB does 25MB of IO:
+	//         1MB read from this level
+	//         10-12MB read from next level (boundaries may be misaligned)
+	//         10-12MB written to next level
+	// This implies that 25 seeks cost the same as the compaction
+	// of 1MB of data.  I.e., one seek costs approximately the
+	// same as the compaction of 40KB of data.  We are a little
+	// conservative and allow approximately one seek for every 16KB
+	// of data before triggering a compaction.
+	f.seekLeft = int32(size / 16384)
+	if f.seekLeft < 100 {
+		f.seekLeft = 100
+	}
+
+	return f
+}
+
+// tFiles hold multiple tFile.
+type tFiles []*tFile
+
+func (tf tFiles) Len() int      { return len(tf) }
+func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
+
+func (tf tFiles) nums() string {
+	x := "[ "
+	for i, f := range tf {
+		if i != 0 {
+			x += ", "
+		}
+		x += fmt.Sprint(f.file.Num())
+	}
+	x += " ]"
+	return x
+}
+
+// Returns true if i smallest key is less than j.
+// This used for sort by key in ascending order.
+func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
+	a, b := tf[i], tf[j]
+	n := icmp.Compare(a.imin, b.imin)
+	if n == 0 {
+		return a.file.Num() < b.file.Num()
+	}
+	return n < 0
+}
+
+// Returns true if i file number is greater than j.
+// This used for sort by file number in descending order.
+func (tf tFiles) lessByNum(i, j int) bool {
+	return tf[i].file.Num() > tf[j].file.Num()
+}
+
+// Sorts tables by key in ascending order.
+func (tf tFiles) sortByKey(icmp *iComparer) {
+	sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
+}
+
+// Sorts tables by file number in descending order.
+func (tf tFiles) sortByNum() {
+	sort.Sort(&tFilesSortByNum{tFiles: tf})
+}
+
+// Returns sum of all tables size.
+func (tf tFiles) size() (sum uint64) {
+	for _, t := range tf {
+		sum += t.size
+	}
+	return sum
+}
+
+// Searches smallest index of tables whose its smallest
+// key is after or equal with given key.
+func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
+	return sort.Search(len(tf), func(i int) bool {
+		return icmp.Compare(tf[i].imin, ikey) >= 0
+	})
+}
+
+// Searches smallest index of tables whose its largest
+// key is after or equal with given key.
+func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
+	return sort.Search(len(tf), func(i int) bool {
+		return icmp.Compare(tf[i].imax, ikey) >= 0
+	})
+}
+
+// Returns true if given key range overlaps with one or more
+// tables key range. If unsorted is true then binary search will not be used.
+func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
+	if unsorted {
+		// Check against all files.
+		for _, t := range tf {
+			if t.overlaps(icmp, umin, umax) {
+				return true
+			}
+		}
+		return false
+	}
+
+	i := 0
+	if len(umin) > 0 {
+		// Find the earliest possible internal key for min.
+		i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek))
+	}
+	if i >= len(tf) {
+		// Beginning of range is after all files, so no overlap.
+		return false
+	}
+	return !tf[i].before(icmp, umax)
+}
+
+// Returns tables whose its key range overlaps with given key range.
+// Range will be expanded if ukey found hop across tables.
+// If overlapped is true then the search will be restarted if umax
+// expanded.
+// The dst content will be overwritten.
+func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
+	dst = dst[:0]
+	for i := 0; i < len(tf); {
+		t := tf[i]
+		if t.overlaps(icmp, umin, umax) {
+			if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
+				umin = t.imin.ukey()
+				dst = dst[:0]
+				i = 0
+				continue
+			} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
+				umax = t.imax.ukey()
+				// Restart search if it is overlapped.
+				if overlapped {
+					dst = dst[:0]
+					i = 0
+					continue
+				}
+			}
+
+			dst = append(dst, t)
+		}
+		i++
+	}
+
+	return dst
+}
+
+// Returns tables key range.
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
+	for i, t := range tf {
+		if i == 0 {
+			imin, imax = t.imin, t.imax
+			continue
+		}
+		if icmp.Compare(t.imin, imin) < 0 {
+			imin = t.imin
+		}
+		if icmp.Compare(t.imax, imax) > 0 {
+			imax = t.imax
+		}
+	}
+
+	return
+}
+
+// Creates iterator index from tables.
+func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
+	if slice != nil {
+		var start, limit int
+		if slice.Start != nil {
+			start = tf.searchMax(icmp, iKey(slice.Start))
+		}
+		if slice.Limit != nil {
+			limit = tf.searchMin(icmp, iKey(slice.Limit))
+		} else {
+			limit = tf.Len()
+		}
+		tf = tf[start:limit]
+	}
+	return iterator.NewArrayIndexer(&tFilesArrayIndexer{
+		tFiles: tf,
+		tops:   tops,
+		icmp:   icmp,
+		slice:  slice,
+		ro:     ro,
+	})
+}
+
+// Tables iterator index.
+type tFilesArrayIndexer struct {
+	tFiles
+	tops  *tOps
+	icmp  *iComparer
+	slice *util.Range
+	ro    *opt.ReadOptions
+}
+
+func (a *tFilesArrayIndexer) Search(key []byte) int {
+	return a.searchMax(a.icmp, iKey(key))
+}
+
+func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
+	if i == 0 || i == a.Len()-1 {
+		return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
+	}
+	return a.tops.newIterator(a.tFiles[i], nil, a.ro)
+}
+
+// Helper type for sortByKey.
+type tFilesSortByKey struct {
+	tFiles
+	icmp *iComparer
+}
+
+func (x *tFilesSortByKey) Less(i, j int) bool {
+	return x.lessByKey(x.icmp, i, j)
+}
+
+// Helper type for sortByNum.
+type tFilesSortByNum struct {
+	tFiles
+}
+
+func (x *tFilesSortByNum) Less(i, j int) bool {
+	return x.lessByNum(i, j)
+}
+
+// Table operations.
+type tOps struct {
+	s      *session
+	noSync bool
+	cache  *cache.Cache
+	bcache *cache.Cache
+	bpool  *util.BufferPool
+}
+
+// Creates an empty table and returns table writer.
+func (t *tOps) create() (*tWriter, error) {
+	file := t.s.getTableFile(t.s.allocFileNum())
+	fw, err := file.Create()
+	if err != nil {
+		return nil, err
+	}
+	return &tWriter{
+		t:    t,
+		file: file,
+		w:    fw,
+		tw:   table.NewWriter(fw, t.s.o.Options),
+	}, nil
+}
+
+// Builds table from src iterator.
+func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
+	w, err := t.create()
+	if err != nil {
+		return
+	}
+
+	defer func() {
+		if err != nil {
+			w.drop()
+		}
+	}()
+
+	for src.Next() {
+		err = w.append(src.Key(), src.Value())
+		if err != nil {
+			return
+		}
+	}
+	err = src.Error()
+	if err != nil {
+		return
+	}
+
+	n = w.tw.EntriesLen()
+	f, err = w.finish()
+	return
+}
+
+// Opens table. It returns a cache handle, which should
+// be released after use.
+func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
+	num := f.file.Num()
+	ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
+		var r storage.Reader
+		r, err = f.file.Open()
+		if err != nil {
+			return 0, nil
+		}
+
+		var bcache *cache.CacheGetter
+		if t.bcache != nil {
+			bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
+		}
+
+		var tr *table.Reader
+		tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
+		if err != nil {
+			r.Close()
+			return 0, nil
+		}
+		return 1, tr
+
+	})
+	if ch == nil && err == nil {
+		err = ErrClosed
+	}
+	return
+}
+
+// Finds key/value pair whose key is greater than or equal to the
+// given key.
+func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
+	ch, err := t.open(f)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer ch.Release()
+	return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+	ch, err := t.open(f)
+	if err != nil {
+		return nil, err
+	}
+	defer ch.Release()
+	return ch.Value().(*table.Reader).FindKey(key, true, ro)
+}
+
+// Returns approximate offset of the given key.
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
+	ch, err := t.open(f)
+	if err != nil {
+		return
+	}
+	defer ch.Release()
+	offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
+	return uint64(offset_), err
+}
+
+// Creates an iterator from the given table.
+func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	ch, err := t.open(f)
+	if err != nil {
+		return iterator.NewEmptyIterator(err)
+	}
+	iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
+	iter.SetReleaser(ch)
+	return iter
+}
+
+// Removes table from persistent storage. It waits until
+// no one use the the table.
+func (t *tOps) remove(f *tFile) {
+	num := f.file.Num()
+	t.cache.Delete(0, num, func() {
+		if err := f.file.Remove(); err != nil {
+			t.s.logf("table@remove removing @%d %q", num, err)
+		} else {
+			t.s.logf("table@remove removed @%d", num)
+		}
+		if t.bcache != nil {
+			t.bcache.EvictNS(num)
+		}
+	})
+}
+
+// Closes the table ops instance. It will close all tables,
+// regadless still used or not.
+func (t *tOps) close() {
+	t.bpool.Close()
+	t.cache.Close()
+	if t.bcache != nil {
+		t.bcache.Close()
+	}
+}
+
+// Creates new initialized table ops instance.
+func newTableOps(s *session) *tOps {
+	var (
+		cacher cache.Cacher
+		bcache *cache.Cache
+		bpool  *util.BufferPool
+	)
+	if s.o.GetOpenFilesCacheCapacity() > 0 {
+		cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
+	}
+	if !s.o.GetDisableBlockCache() {
+		var bcacher cache.Cacher
+		if s.o.GetBlockCacheCapacity() > 0 {
+			bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
+		}
+		bcache = cache.NewCache(bcacher)
+	}
+	if !s.o.GetDisableBufferPool() {
+		bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
+	}
+	return &tOps{
+		s:      s,
+		noSync: s.o.GetNoSync(),
+		cache:  cache.NewCache(cacher),
+		bcache: bcache,
+		bpool:  bpool,
+	}
+}
+
+// tWriter wraps the table writer. It keep track of file descriptor
+// and added key range.
+type tWriter struct {
+	t *tOps
+
+	file storage.File
+	w    storage.Writer
+	tw   *table.Writer
+
+	first, last []byte
+}
+
+// Append key/value pair to the table.
+func (w *tWriter) append(key, value []byte) error {
+	if w.first == nil {
+		w.first = append([]byte{}, key...)
+	}
+	w.last = append(w.last[:0], key...)
+	return w.tw.Append(key, value)
+}
+
+// Returns true if the table is empty.
+func (w *tWriter) empty() bool {
+	return w.first == nil
+}
+
+// Closes the storage.Writer.
+func (w *tWriter) close() {
+	if w.w != nil {
+		w.w.Close()
+		w.w = nil
+	}
+}
+
+// Finalizes the table and returns table file.
+func (w *tWriter) finish() (f *tFile, err error) {
+	defer w.close()
+	err = w.tw.Close()
+	if err != nil {
+		return
+	}
+	if !w.t.noSync {
+		err = w.w.Sync()
+		if err != nil {
+			return
+		}
+	}
+	f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
+	return
+}
+
+// Drops the table.
+func (w *tWriter) drop() {
+	w.close()
+	w.file.Remove()
+	w.t.s.reuseFileNum(w.file.Num())
+	w.file = nil
+	w.tw = nil
+	w.first = nil
+	w.last = nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
new file mode 100644
index 0000000..632278d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
@@ -0,0 +1,139 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type blockTesting struct {
+	tr *Reader
+	b  *block
+}
+
+func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
+	return t.tr.newBlockIter(t.b, nil, slice, false)
+}
+
+var _ = testutil.Defer(func() {
+	Describe("Block", func() {
+		Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
+			// Building the block.
+			bw := &blockWriter{
+				restartInterval: restartInterval,
+				scratch:         make([]byte, 30),
+			}
+			kv.Iterate(func(i int, key, value []byte) {
+				bw.append(key, value)
+			})
+			bw.finish()
+
+			// Opening the block.
+			data := bw.buf.Bytes()
+			restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
+			return &blockTesting{
+				tr: &Reader{cmp: comparer.DefaultComparer},
+				b: &block{
+					data:           data,
+					restartsLen:    restartsLen,
+					restartsOffset: len(data) - (restartsLen+1)*4,
+				},
+			}
+		}
+
+		Describe("read test", func() {
+			for restartInterval := 1; restartInterval <= 5; restartInterval++ {
+				Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
+					kv := &testutil.KeyValue{}
+					Text := func() string {
+						return fmt.Sprintf("and %d keys", kv.Len())
+					}
+
+					Test := func() {
+						// Make block.
+						br := Build(kv, restartInterval)
+						// Do testing.
+						testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil)
+					}
+
+					Describe(Text(), Test)
+
+					kv.PutString("", "empty")
+					Describe(Text(), Test)
+
+					kv.PutString("a1", "foo")
+					Describe(Text(), Test)
+
+					kv.PutString("a2", "v")
+					Describe(Text(), Test)
+
+					kv.PutString("a3qqwrkks", "hello")
+					Describe(Text(), Test)
+
+					kv.PutString("a4", "bar")
+					Describe(Text(), Test)
+
+					kv.PutString("a5111111", "v5")
+					kv.PutString("a6", "")
+					kv.PutString("a7", "v7")
+					kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8")
+					kv.PutString("b", "v9")
+					kv.PutString("c9", "v9")
+					kv.PutString("c91", "v9")
+					kv.PutString("d0", "v9")
+					Describe(Text(), Test)
+				})
+			}
+		})
+
+		Describe("out-of-bound slice test", func() {
+			kv := &testutil.KeyValue{}
+			kv.PutString("k1", "v1")
+			kv.PutString("k2", "v2")
+			kv.PutString("k3abcdefgg", "v3")
+			kv.PutString("k4", "v4")
+			kv.PutString("k5", "v5")
+			for restartInterval := 1; restartInterval <= 5; restartInterval++ {
+				Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
+					// Make block.
+					bt := Build(kv, restartInterval)
+
+					Test := func(r *util.Range) func(done Done) {
+						return func(done Done) {
+							iter := bt.TestNewIterator(r)
+							Expect(iter.Error()).ShouldNot(HaveOccurred())
+
+							t := testutil.IteratorTesting{
+								KeyValue: kv.Clone(),
+								Iter:     iter,
+							}
+
+							testutil.DoIteratorTesting(&t)
+							iter.Release()
+							done <- true
+						}
+					}
+
+					It("Should do iterations and seeks correctly #0",
+						Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0)
+
+					It("Should do iterations and seeks correctly #1",
+						Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0)
+				})
+			}
+		})
+	})
+})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
new file mode 100644
index 0000000..6b7f050
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -0,0 +1,1106 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/golang/snappy"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	ErrNotFound       = errors.ErrNotFound
+	ErrReaderReleased = errors.New("leveldb/table: reader released")
+	ErrIterReleased   = errors.New("leveldb/table: iterator released")
+)
+
+type ErrCorrupted struct {
+	Pos    int64
+	Size   int64
+	Kind   string
+	Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+	return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason)
+}
+
+func max(x, y int) int {
+	if x > y {
+		return x
+	}
+	return y
+}
+
+type block struct {
+	bpool          *util.BufferPool
+	bh             blockHandle
+	data           []byte
+	restartsLen    int
+	restartsOffset int
+}
+
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
+	index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+		offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
+		offset += 1                                 // shared always zero, since this is a restart point
+		v1, n1 := binary.Uvarint(b.data[offset:])   // key length
+		_, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
+		m := offset + n1 + n2
+		return cmp.Compare(b.data[m:m+int(v1)], key) > 0
+	}) + rstart - 1
+	if index < rstart {
+		// The smallest key is greater-than key sought.
+		index = rstart
+	}
+	offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+	return
+}
+
+func (b *block) restartIndex(rstart, rlimit, offset int) int {
+	return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+		return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
+	}) + rstart - 1
+}
+
+func (b *block) restartOffset(index int) int {
+	return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+}
+
+func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) {
+	if offset >= b.restartsOffset {
+		if offset != b.restartsOffset {
+			err = &ErrCorrupted{Reason: "entries offset not aligned"}
+		}
+		return
+	}
+	v0, n0 := binary.Uvarint(b.data[offset:])       // Shared prefix length
+	v1, n1 := binary.Uvarint(b.data[offset+n0:])    // Key length
+	v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length
+	m := n0 + n1 + n2
+	n = m + int(v1) + int(v2)
+	if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset {
+		err = &ErrCorrupted{Reason: "entries corrupted"}
+		return
+	}
+	key = b.data[offset+m : offset+m+int(v1)]
+	value = b.data[offset+m+int(v1) : offset+n]
+	nShared = int(v0)
+	return
+}
+
+func (b *block) Release() {
+	b.bpool.Put(b.data)
+	b.bpool = nil
+	b.data = nil
+}
+
+type dir int
+
+const (
+	dirReleased dir = iota - 1
+	dirSOI
+	dirEOI
+	dirBackward
+	dirForward
+)
+
+type blockIter struct {
+	tr            *Reader
+	block         *block
+	blockReleaser util.Releaser
+	releaser      util.Releaser
+	key, value    []byte
+	offset        int
+	// Previous offset, only filled by Next.
+	prevOffset   int
+	prevNode     []int
+	prevKeys     []byte
+	restartIndex int
+	// Iterator direction.
+	dir dir
+	// Restart index slice range.
+	riStart int
+	riLimit int
+	// Offset slice range.
+	offsetStart     int
+	offsetRealStart int
+	offsetLimit     int
+	// Error.
+	err error
+}
+
+func (i *blockIter) sErr(err error) {
+	i.err = err
+	i.key = nil
+	i.value = nil
+	i.prevNode = nil
+	i.prevKeys = nil
+}
+
+func (i *blockIter) reset() {
+	if i.dir == dirBackward {
+		i.prevNode = i.prevNode[:0]
+		i.prevKeys = i.prevKeys[:0]
+	}
+	i.restartIndex = i.riStart
+	i.offset = i.offsetStart
+	i.dir = dirSOI
+	i.key = i.key[:0]
+	i.value = nil
+}
+
+func (i *blockIter) isFirst() bool {
+	switch i.dir {
+	case dirForward:
+		return i.prevOffset == i.offsetRealStart
+	case dirBackward:
+		return len(i.prevNode) == 1 && i.restartIndex == i.riStart
+	}
+	return false
+}
+
+func (i *blockIter) isLast() bool {
+	switch i.dir {
+	case dirForward, dirBackward:
+		return i.offset == i.offsetLimit
+	}
+	return false
+}
+
+func (i *blockIter) First() bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.dir == dirBackward {
+		i.prevNode = i.prevNode[:0]
+		i.prevKeys = i.prevKeys[:0]
+	}
+	i.dir = dirSOI
+	return i.Next()
+}
+
+func (i *blockIter) Last() bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.dir == dirBackward {
+		i.prevNode = i.prevNode[:0]
+		i.prevKeys = i.prevKeys[:0]
+	}
+	i.dir = dirEOI
+	return i.Prev()
+}
+
+func (i *blockIter) Seek(key []byte) bool {
+	if i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
+	if err != nil {
+		i.sErr(err)
+		return false
+	}
+	i.restartIndex = ri
+	i.offset = max(i.offsetStart, offset)
+	if i.dir == dirSOI || i.dir == dirEOI {
+		i.dir = dirForward
+	}
+	for i.Next() {
+		if i.tr.cmp.Compare(i.key, key) >= 0 {
+			return true
+		}
+	}
+	return false
+}
+
+func (i *blockIter) Next() bool {
+	if i.dir == dirEOI || i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	if i.dir == dirSOI {
+		i.restartIndex = i.riStart
+		i.offset = i.offsetStart
+	} else if i.dir == dirBackward {
+		i.prevNode = i.prevNode[:0]
+		i.prevKeys = i.prevKeys[:0]
+	}
+	for i.offset < i.offsetRealStart {
+		key, value, nShared, n, err := i.block.entry(i.offset)
+		if err != nil {
+			i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+			return false
+		}
+		if n == 0 {
+			i.dir = dirEOI
+			return false
+		}
+		i.key = append(i.key[:nShared], key...)
+		i.value = value
+		i.offset += n
+	}
+	if i.offset >= i.offsetLimit {
+		i.dir = dirEOI
+		if i.offset != i.offsetLimit {
+			i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+		}
+		return false
+	}
+	key, value, nShared, n, err := i.block.entry(i.offset)
+	if err != nil {
+		i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+		return false
+	}
+	if n == 0 {
+		i.dir = dirEOI
+		return false
+	}
+	i.key = append(i.key[:nShared], key...)
+	i.value = value
+	i.prevOffset = i.offset
+	i.offset += n
+	i.dir = dirForward
+	return true
+}
+
+func (i *blockIter) Prev() bool {
+	if i.dir == dirSOI || i.err != nil {
+		return false
+	} else if i.dir == dirReleased {
+		i.err = ErrIterReleased
+		return false
+	}
+
+	var ri int
+	if i.dir == dirForward {
+		// Change direction.
+		i.offset = i.prevOffset
+		if i.offset == i.offsetRealStart {
+			i.dir = dirSOI
+			return false
+		}
+		ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset)
+		i.dir = dirBackward
+	} else if i.dir == dirEOI {
+		// At the end of iterator.
+		i.restartIndex = i.riLimit
+		i.offset = i.offsetLimit
+		if i.offset == i.offsetRealStart {
+			i.dir = dirSOI
+			return false
+		}
+		ri = i.riLimit - 1
+		i.dir = dirBackward
+	} else if len(i.prevNode) == 1 {
+		// This is the end of a restart range.
+		i.offset = i.prevNode[0]
+		i.prevNode = i.prevNode[:0]
+		if i.restartIndex == i.riStart {
+			i.dir = dirSOI
+			return false
+		}
+		i.restartIndex--
+		ri = i.restartIndex
+	} else {
+		// In the middle of restart range, get from cache.
+		n := len(i.prevNode) - 3
+		node := i.prevNode[n:]
+		i.prevNode = i.prevNode[:n]
+		// Get the key.
+		ko := node[0]
+		i.key = append(i.key[:0], i.prevKeys[ko:]...)
+		i.prevKeys = i.prevKeys[:ko]
+		// Get the value.
+		vo := node[1]
+		vl := vo + node[2]
+		i.value = i.block.data[vo:vl]
+		i.offset = vl
+		return true
+	}
+	// Build entries cache.
+	i.key = i.key[:0]
+	i.value = nil
+	offset := i.block.restartOffset(ri)
+	if offset == i.offset {
+		ri -= 1
+		if ri < 0 {
+			i.dir = dirSOI
+			return false
+		}
+		offset = i.block.restartOffset(ri)
+	}
+	i.prevNode = append(i.prevNode, offset)
+	for {
+		key, value, nShared, n, err := i.block.entry(offset)
+		if err != nil {
+			i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+			return false
+		}
+		if offset >= i.offsetRealStart {
+			if i.value != nil {
+				// Appends 3 variables:
+				// 1. Previous keys offset
+				// 2. Value offset in the data block
+				// 3. Value length
+				i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value))
+				i.prevKeys = append(i.prevKeys, i.key...)
+			}
+			i.value = value
+		}
+		i.key = append(i.key[:nShared], key...)
+		offset += n
+		// Stop if target offset reached.
+		if offset >= i.offset {
+			if offset != i.offset {
+				i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+				return false
+			}
+
+			break
+		}
+	}
+	i.restartIndex = ri
+	i.offset = offset
+	return true
+}
+
+func (i *blockIter) Key() []byte {
+	if i.err != nil || i.dir <= dirEOI {
+		return nil
+	}
+	return i.key
+}
+
+func (i *blockIter) Value() []byte {
+	if i.err != nil || i.dir <= dirEOI {
+		return nil
+	}
+	return i.value
+}
+
+func (i *blockIter) Release() {
+	if i.dir != dirReleased {
+		i.tr = nil
+		i.block = nil
+		i.prevNode = nil
+		i.prevKeys = nil
+		i.key = nil
+		i.value = nil
+		i.dir = dirReleased
+		if i.blockReleaser != nil {
+			i.blockReleaser.Release()
+			i.blockReleaser = nil
+		}
+		if i.releaser != nil {
+			i.releaser.Release()
+			i.releaser = nil
+		}
+	}
+}
+
+func (i *blockIter) SetReleaser(releaser util.Releaser) {
+	if i.dir == dirReleased {
+		panic(util.ErrReleased)
+	}
+	if i.releaser != nil && releaser != nil {
+		panic(util.ErrHasReleaser)
+	}
+	i.releaser = releaser
+}
+
+func (i *blockIter) Valid() bool {
+	return i.err == nil && (i.dir == dirBackward || i.dir == dirForward)
+}
+
+func (i *blockIter) Error() error {
+	return i.err
+}
+
+type filterBlock struct {
+	bpool      *util.BufferPool
+	data       []byte
+	oOffset    int
+	baseLg     uint
+	filtersNum int
+}
+
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
+	i := int(offset >> b.baseLg)
+	if i < b.filtersNum {
+		o := b.data[b.oOffset+i*4:]
+		n := int(binary.LittleEndian.Uint32(o))
+		m := int(binary.LittleEndian.Uint32(o[4:]))
+		if n < m && m <= b.oOffset {
+			return filter.Contains(b.data[n:m], key)
+		} else if n == m {
+			return false
+		}
+	}
+	return true
+}
+
+func (b *filterBlock) Release() {
+	b.bpool.Put(b.data)
+	b.bpool = nil
+	b.data = nil
+}
+
+type indexIter struct {
+	*blockIter
+	tr    *Reader
+	slice *util.Range
+	// Options
+	fillCache bool
+}
+
+func (i *indexIter) Get() iterator.Iterator {
+	value := i.Value()
+	if value == nil {
+		return nil
+	}
+	dataBH, n := decodeBlockHandle(value)
+	if n == 0 {
+		return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle"))
+	}
+
+	var slice *util.Range
+	if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
+		slice = i.slice
+	}
+	return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache)
+}
+
+// Reader is a table reader.
+type Reader struct {
+	mu     sync.RWMutex
+	fi     *storage.FileInfo
+	reader io.ReaderAt
+	cache  *cache.CacheGetter
+	err    error
+	bpool  *util.BufferPool
+	// Options
+	o              *opt.Options
+	cmp            comparer.Comparer
+	filter         filter.Filter
+	verifyChecksum bool
+
+	dataEnd                   int64
+	metaBH, indexBH, filterBH blockHandle
+	indexBlock                *block
+	filterBlock               *filterBlock
+}
+
+func (r *Reader) blockKind(bh blockHandle) string {
+	switch bh.offset {
+	case r.metaBH.offset:
+		return "meta-block"
+	case r.indexBH.offset:
+		return "index-block"
+	case r.filterBH.offset:
+		if r.filterBH.length > 0 {
+			return "filter-block"
+		}
+	}
+	return "data-block"
+}
+
+func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
+	return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
+}
+
+func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
+	return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason)
+}
+
+func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
+	if cerr, ok := err.(*ErrCorrupted); ok {
+		cerr.Pos = int64(bh.offset)
+		cerr.Size = int64(bh.length)
+		cerr.Kind = r.blockKind(bh)
+		return &errors.ErrCorrupted{File: r.fi, Err: cerr}
+	}
+	return err
+}
+
+func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) {
+	data := r.bpool.Get(int(bh.length + blockTrailerLen))
+	if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
+		return nil, err
+	}
+
+	if verifyChecksum {
+		n := bh.length + 1
+		checksum0 := binary.LittleEndian.Uint32(data[n:])
+		checksum1 := util.NewCRC(data[:n]).Value()
+		if checksum0 != checksum1 {
+			r.bpool.Put(data)
+			return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
+		}
+	}
+
+	switch data[bh.length] {
+	case blockTypeNoCompression:
+		data = data[:bh.length]
+	case blockTypeSnappyCompression:
+		decLen, err := snappy.DecodedLen(data[:bh.length])
+		if err != nil {
+			return nil, r.newErrCorruptedBH(bh, err.Error())
+		}
+		decData := r.bpool.Get(decLen)
+		decData, err = snappy.Decode(decData, data[:bh.length])
+		r.bpool.Put(data)
+		if err != nil {
+			r.bpool.Put(decData)
+			return nil, r.newErrCorruptedBH(bh, err.Error())
+		}
+		data = decData
+	default:
+		r.bpool.Put(data)
+		return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length]))
+	}
+	return data, nil
+}
+
+func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) {
+	data, err := r.readRawBlock(bh, verifyChecksum)
+	if err != nil {
+		return nil, err
+	}
+	restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
+	b := &block{
+		bpool:          r.bpool,
+		bh:             bh,
+		data:           data,
+		restartsLen:    restartsLen,
+		restartsOffset: len(data) - (restartsLen+1)*4,
+	}
+	return b, nil
+}
+
+func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
+	if r.cache != nil {
+		var (
+			err error
+			ch  *cache.Handle
+		)
+		if fillCache {
+			ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+				var b *block
+				b, err = r.readBlock(bh, verifyChecksum)
+				if err != nil {
+					return 0, nil
+				}
+				return cap(b.data), b
+			})
+		} else {
+			ch = r.cache.Get(bh.offset, nil)
+		}
+		if ch != nil {
+			b, ok := ch.Value().(*block)
+			if !ok {
+				ch.Release()
+				return nil, nil, errors.New("leveldb/table: inconsistent block type")
+			}
+			return b, ch, err
+		} else if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	b, err := r.readBlock(bh, verifyChecksum)
+	return b, b, err
+}
+
+func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
+	data, err := r.readRawBlock(bh, true)
+	if err != nil {
+		return nil, err
+	}
+	n := len(data)
+	if n < 5 {
+		return nil, r.newErrCorruptedBH(bh, "too short")
+	}
+	m := n - 5
+	oOffset := int(binary.LittleEndian.Uint32(data[m:]))
+	if oOffset > m {
+		return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset")
+	}
+	b := &filterBlock{
+		bpool:      r.bpool,
+		data:       data,
+		oOffset:    oOffset,
+		baseLg:     uint(data[n-1]),
+		filtersNum: (m - oOffset) / 4,
+	}
+	return b, nil
+}
+
+func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
+	if r.cache != nil {
+		var (
+			err error
+			ch  *cache.Handle
+		)
+		if fillCache {
+			ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+				var b *filterBlock
+				b, err = r.readFilterBlock(bh)
+				if err != nil {
+					return 0, nil
+				}
+				return cap(b.data), b
+			})
+		} else {
+			ch = r.cache.Get(bh.offset, nil)
+		}
+		if ch != nil {
+			b, ok := ch.Value().(*filterBlock)
+			if !ok {
+				ch.Release()
+				return nil, nil, errors.New("leveldb/table: inconsistent block type")
+			}
+			return b, ch, err
+		} else if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	b, err := r.readFilterBlock(bh)
+	return b, b, err
+}
+
+func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) {
+	if r.indexBlock == nil {
+		return r.readBlockCached(r.indexBH, true, fillCache)
+	}
+	return r.indexBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) {
+	if r.filterBlock == nil {
+		return r.readFilterBlockCached(r.filterBH, fillCache)
+	}
+	return r.filterBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
+	bi := &blockIter{
+		tr:            r,
+		block:         b,
+		blockReleaser: bReleaser,
+		// Valid key should never be nil.
+		key:             make([]byte, 0),
+		dir:             dirSOI,
+		riStart:         0,
+		riLimit:         b.restartsLen,
+		offsetStart:     0,
+		offsetRealStart: 0,
+		offsetLimit:     b.restartsOffset,
+	}
+	if slice != nil {
+		if slice.Start != nil {
+			if bi.Seek(slice.Start) {
+				bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
+				bi.offsetStart = b.restartOffset(bi.riStart)
+				bi.offsetRealStart = bi.prevOffset
+			} else {
+				bi.riStart = b.restartsLen
+				bi.offsetStart = b.restartsOffset
+				bi.offsetRealStart = b.restartsOffset
+			}
+		}
+		if slice.Limit != nil {
+			if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
+				bi.offsetLimit = bi.prevOffset
+				bi.riLimit = bi.restartIndex + 1
+			}
+		}
+		bi.reset()
+		if bi.offsetStart > bi.offsetLimit {
+			bi.sErr(errors.New("leveldb/table: invalid slice range"))
+		}
+	}
+	return bi
+}
+
+func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+	b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache)
+	if err != nil {
+		return iterator.NewEmptyIterator(err)
+	}
+	return r.newBlockIter(b, rel, slice, false)
+}
+
+func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	if r.err != nil {
+		return iterator.NewEmptyIterator(r.err)
+	}
+
+	return r.getDataIter(dataBH, slice, verifyChecksum, fillCache)
+}
+
+// NewIterator creates an iterator from the table.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// table. And a nil Range.Limit is treated as a key after all keys in
+// the table.
+//
+// The returned iterator is not goroutine-safe and should be released
+// when not used.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	if r.err != nil {
+		return iterator.NewEmptyIterator(r.err)
+	}
+
+	fillCache := !ro.GetDontFillCache()
+	indexBlock, rel, err := r.getIndexBlock(fillCache)
+	if err != nil {
+		return iterator.NewEmptyIterator(err)
+	}
+	index := &indexIter{
+		blockIter: r.newBlockIter(indexBlock, rel, slice, true),
+		tr:        r,
+		slice:     slice,
+		fillCache: !ro.GetDontFillCache(),
+	}
+	return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
+}
+
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	if r.err != nil {
+		err = r.err
+		return
+	}
+
+	indexBlock, rel, err := r.getIndexBlock(true)
+	if err != nil {
+		return
+	}
+	defer rel.Release()
+
+	index := r.newBlockIter(indexBlock, nil, nil, true)
+	defer index.Release()
+	if !index.Seek(key) {
+		err = index.Error()
+		if err == nil {
+			err = ErrNotFound
+		}
+		return
+	}
+	dataBH, n := decodeBlockHandle(index.Value())
+	if n == 0 {
+		r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+		return
+	}
+	if filtered && r.filter != nil {
+		filterBlock, frel, ferr := r.getFilterBlock(true)
+		if ferr == nil {
+			if !filterBlock.contains(r.filter, dataBH.offset, key) {
+				frel.Release()
+				return nil, nil, ErrNotFound
+			}
+			frel.Release()
+		} else if !errors.IsCorrupted(ferr) {
+			err = ferr
+			return
+		}
+	}
+	data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+	defer data.Release()
+	if !data.Seek(key) {
+		err = data.Error()
+		if err == nil {
+			err = ErrNotFound
+		}
+		return
+	}
+	// Don't use block buffer, no need to copy the buffer.
+	rkey = data.Key()
+	if !noValue {
+		if r.bpool == nil {
+			value = data.Value()
+		} else {
+			// Use block buffer, and since the buffer will be recycled, the buffer
+			// need to be copied.
+			value = append([]byte{}, data.Value()...)
+		}
+	}
+	return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+	return r.find(key, filtered, ro, false)
+}
+
+// Find finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+	rkey, _, err = r.find(key, filtered, ro, true)
+	return
+}
+
+// Get gets the value for the given key. It returns errors.ErrNotFound
+// if the table does not contain the key.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	if r.err != nil {
+		err = r.err
+		return
+	}
+
+	rkey, value, err := r.find(key, false, ro, false)
+	if err == nil && r.cmp.Compare(rkey, key) != 0 {
+		value = nil
+		err = ErrNotFound
+	}
+	return
+}
+
+// OffsetOf returns approximate offset for the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	if r.err != nil {
+		err = r.err
+		return
+	}
+
+	indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
+	if err != nil {
+		return
+	}
+	defer rel.Release()
+
+	index := r.newBlockIter(indexBlock, nil, nil, true)
+	defer index.Release()
+	if index.Seek(key) {
+		dataBH, n := decodeBlockHandle(index.Value())
+		if n == 0 {
+			r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+			return
+		}
+		offset = int64(dataBH.offset)
+		return
+	}
+	err = index.Error()
+	if err == nil {
+		offset = r.dataEnd
+	}
+	return
+}
+
+// Release implements util.Releaser.
+// It also close the file if it is an io.Closer.
+func (r *Reader) Release() {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	if closer, ok := r.reader.(io.Closer); ok {
+		closer.Close()
+	}
+	if r.indexBlock != nil {
+		r.indexBlock.Release()
+		r.indexBlock = nil
+	}
+	if r.filterBlock != nil {
+		r.filterBlock.Release()
+		r.filterBlock = nil
+	}
+	r.reader = nil
+	r.cache = nil
+	r.bpool = nil
+	r.err = ErrReaderReleased
+}
+
+// NewReader creates a new initialized table reader for the file.
+// The fi, cache and bpool is optional and can be nil.
+//
+// The returned table reader instance is goroutine-safe.
+func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+	if f == nil {
+		return nil, errors.New("leveldb/table: nil file")
+	}
+
+	r := &Reader{
+		fi:             fi,
+		reader:         f,
+		cache:          cache,
+		bpool:          bpool,
+		o:              o,
+		cmp:            o.GetComparer(),
+		verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
+	}
+
+	if size < footerLen {
+		r.err = r.newErrCorrupted(0, size, "table", "too small")
+		return r, nil
+	}
+
+	footerPos := size - footerLen
+	var footer [footerLen]byte
+	if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
+		return nil, err
+	}
+	if string(footer[footerLen-len(magic):footerLen]) != magic {
+		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
+		return r, nil
+	}
+
+	var n int
+	// Decode the metaindex block handle.
+	r.metaBH, n = decodeBlockHandle(footer[:])
+	if n == 0 {
+		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
+		return r, nil
+	}
+
+	// Decode the index block handle.
+	r.indexBH, n = decodeBlockHandle(footer[n:])
+	if n == 0 {
+		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
+		return r, nil
+	}
+
+	// Read metaindex block.
+	metaBlock, err := r.readBlock(r.metaBH, true)
+	if err != nil {
+		if errors.IsCorrupted(err) {
+			r.err = err
+			return r, nil
+		} else {
+			return nil, err
+		}
+	}
+
+	// Set data end.
+	r.dataEnd = int64(r.metaBH.offset)
+
+	// Read metaindex.
+	metaIter := r.newBlockIter(metaBlock, nil, nil, true)
+	for metaIter.Next() {
+		key := string(metaIter.Key())
+		if !strings.HasPrefix(key, "filter.") {
+			continue
+		}
+		fn := key[7:]
+		if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
+			r.filter = f0
+		} else {
+			for _, f0 := range o.GetAltFilters() {
+				if f0.Name() == fn {
+					r.filter = f0
+					break
+				}
+			}
+		}
+		if r.filter != nil {
+			filterBH, n := decodeBlockHandle(metaIter.Value())
+			if n == 0 {
+				continue
+			}
+			r.filterBH = filterBH
+			// Update data end.
+			r.dataEnd = int64(filterBH.offset)
+			break
+		}
+	}
+	metaIter.Release()
+	metaBlock.Release()
+
+	// Cache index and filter block locally, since we don't have global cache.
+	if cache == nil {
+		r.indexBlock, err = r.readBlock(r.indexBH, true)
+		if err != nil {
+			if errors.IsCorrupted(err) {
+				r.err = err
+				return r, nil
+			} else {
+				return nil, err
+			}
+		}
+		if r.filter != nil {
+			r.filterBlock, err = r.readFilterBlock(r.filterBH)
+			if err != nil {
+				if !errors.IsCorrupted(err) {
+					return nil, err
+				}
+
+				// Don't use filter then.
+				r.filter = nil
+			}
+		}
+	}
+
+	return r, nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
new file mode 100644
index 0000000..beacdc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
@@ -0,0 +1,177 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package table allows read and write sorted key/value.
+package table
+
+import (
+	"encoding/binary"
+)
+
+/*
+Table:
+
+Table is consist of one or more data blocks, an optional filter block
+a metaindex block, an index block and a table footer. Metaindex block
+is a special block used to keep parameters of the table, such as filter
+block name and its block handle. Index block is a special block used to
+keep record of data blocks offset and length, index block use one as
+restart interval. The key used by index block are the last key of preceding
+block, shorter separator of adjacent blocks or shorter successor of the
+last key of the last block. Filter block is an optional block contains
+sequence of filter data generated by a filter generator.
+
+Table data structure:
+                                                         + optional
+                                                        /
+    +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+
+    | data block 1 |      ...     | data block n | filter block | metaindex block | index block | footer |
+    +--------------+--------------+--------------+--------------+-----------------+-------------+--------+
+
+    Each block followed by a 5-bytes trailer contains compression type and checksum.
+
+Table block trailer:
+
+    +---------------------------+-------------------+
+    | compression type (1-byte) | checksum (4-byte) |
+    +---------------------------+-------------------+
+
+    The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression
+    type also included in the checksum.
+
+Table footer:
+
+      +------------------- 40-bytes -------------------+
+     /                                                  \
+    +------------------------+--------------------+------+-----------------+
+    | metaindex block handle / index block handle / ---- | magic (8-bytes) |
+    +------------------------+--------------------+------+-----------------+
+
+    The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/".
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Block:
+
+Block is consist of one or more key/value entries and a block trailer.
+Block entry shares key prefix with its preceding key until a restart
+point reached. A block should contains at least one restart point.
+First restart point are always zero.
+
+Block data structure:
+
+      + restart point                 + restart point (depends on restart interval)
+     /                               /
+    +---------------+---------------+---------------+---------------+---------+
+    | block entry 1 | block entry 2 |      ...      | block entry n | trailer |
+    +---------------+---------------+---------------+---------------+---------+
+
+Key/value entry:
+
+              +---- key len ----+
+             /                   \
+    +-------+---------+-----------+---------+--------------------+--------------+----------------+
+    | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) |
+    +-----------------+---------------------+--------------------+--------------+----------------+
+
+    Block entry shares key prefix with its preceding key:
+    Conditions:
+        restart_interval=2
+        entry one  : key=deck,value=v1
+        entry two  : key=dock,value=v2
+        entry three: key=duck,value=v3
+    The entries will be encoded as follow:
+
+      + restart point (offset=0)                                                 + restart point (offset=16)
+     /                                                                          /
+    +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+    |  0  |  4  |  2  |  "deck"  |  "v1"  |  1  |  3  |  2  |  "ock"  |  "v2"  |  0  |  4  |  2  |  "duck"  |  "v3"  |
+    +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+     \                                   / \                                  / \                                   /
+      +----------- entry one -----------+   +----------- entry two ----------+   +---------- entry three ----------+
+
+    The block trailer will contains two restart points:
+
+    +------------+-----------+--------+
+    |     0      |    16     |   2    |
+    +------------+-----------+---+----+
+     \                      /     \
+      +-- restart points --+       + restart points length
+
+Block trailer:
+
+      +-- 4-bytes --+
+     /               \
+    +-----------------+-----------------+-----------------+------------------------------+
+    | restart point 1 |       ....      | restart point n | restart points len (4-bytes) |
+    +-----------------+-----------------+-----------------+------------------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Filter block:
+
+Filter block consist of one or more filter data and a filter block trailer.
+The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg.
+
+Filter block data structure:
+
+      + offset 1      + offset 2      + offset n      + trailer offset
+     /               /               /               /
+    +---------------+---------------+---------------+---------+
+    | filter data 1 |      ...      | filter data n | trailer |
+    +---------------+---------------+---------------+---------+
+
+Filter block trailer:
+
+      +- 4-bytes -+
+     /             \
+    +---------------+---------------+---------------+-------------------------------+------------------+
+    | data 1 offset |      ....     | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
+    +-------------- +---------------+---------------+-------------------------------+------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+const (
+	blockTrailerLen = 5
+	footerLen       = 48
+
+	magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
+
+	// The block type gives the per-block compression format.
+	// These constants are part of the file format and should not be changed.
+	blockTypeNoCompression     = 0
+	blockTypeSnappyCompression = 1
+
+	// Generate new filter every 2KB of data
+	filterBaseLg = 11
+	filterBase   = 1 << filterBaseLg
+)
+
+type blockHandle struct {
+	offset, length uint64
+}
+
+func decodeBlockHandle(src []byte) (blockHandle, int) {
+	offset, n := binary.Uvarint(src)
+	length, m := binary.Uvarint(src[n:])
+	if n == 0 || m == 0 {
+		return blockHandle{}, 0
+	}
+	return blockHandle{offset, length}, n + m
+}
+
+func encodeBlockHandle(dst []byte, b blockHandle) int {
+	n := binary.PutUvarint(dst, b.offset)
+	m := binary.PutUvarint(dst[n:], b.length)
+	return n + m
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
new file mode 100644
index 0000000..2f52871
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
@@ -0,0 +1,11 @@
+package table
+
+import (
+	"testing"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+)
+
+func TestTable(t *testing.T) {
+	testutil.RunSuite(t, "Table Suite")
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
new file mode 100644
index 0000000..7ba02e1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+	"bytes"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type tableWrapper struct {
+	*Reader
+}
+
+func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
+	return t.Reader.Find(key, false, nil)
+}
+
+func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {
+	return t.Reader.Get(key, nil)
+}
+
+func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator {
+	return t.Reader.NewIterator(slice, nil)
+}
+
+var _ = testutil.Defer(func() {
+	Describe("Table", func() {
+		Describe("approximate offset test", func() {
+			var (
+				buf = &bytes.Buffer{}
+				o   = &opt.Options{
+					BlockSize:   1024,
+					Compression: opt.NoCompression,
+				}
+			)
+
+			// Building the table.
+			tw := NewWriter(buf, o)
+			tw.Append([]byte("k01"), []byte("hello"))
+			tw.Append([]byte("k02"), []byte("hello2"))
+			tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000))
+			tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000))
+			tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000))
+			tw.Append([]byte("k06"), []byte("hello3"))
+			tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000))
+			err := tw.Close()
+
+			It("Should be able to approximate offset of a key correctly", func() {
+				Expect(err).ShouldNot(HaveOccurred())
+
+				tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
+				Expect(err).ShouldNot(HaveOccurred())
+				CheckOffset := func(key string, expect, threshold int) {
+					offset, err := tr.OffsetOf([]byte(key))
+					Expect(err).ShouldNot(HaveOccurred())
+					Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key)
+				}
+
+				CheckOffset("k0", 0, 0)
+				CheckOffset("k01a", 0, 0)
+				CheckOffset("k02", 0, 0)
+				CheckOffset("k03", 0, 0)
+				CheckOffset("k04", 10000, 1000)
+				CheckOffset("k04a", 210000, 1000)
+				CheckOffset("k05", 210000, 1000)
+				CheckOffset("k06", 510000, 1000)
+				CheckOffset("k07", 510000, 1000)
+				CheckOffset("xyz", 610000, 2000)
+			})
+		})
+
+		Describe("read test", func() {
+			Build := func(kv testutil.KeyValue) testutil.DB {
+				o := &opt.Options{
+					BlockSize:            512,
+					BlockRestartInterval: 3,
+				}
+				buf := &bytes.Buffer{}
+
+				// Building the table.
+				tw := NewWriter(buf, o)
+				kv.Iterate(func(i int, key, value []byte) {
+					tw.Append(key, value)
+				})
+				tw.Close()
+
+				// Opening the table.
+				tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
+				return tableWrapper{tr}
+			}
+			Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
+				return func() {
+					db := Build(*kv)
+					if body != nil {
+						body(db.(tableWrapper).Reader)
+					}
+					testutil.KeyValueTesting(nil, *kv, db, nil, nil)
+				}
+			}
+
+			testutil.AllKeyValueTesting(nil, Build, nil, nil)
+			Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
+				It("should have correct blocks number", func() {
+					indexBlock, err := r.readBlock(r.indexBH, true)
+					Expect(err).To(BeNil())
+					Expect(indexBlock.restartsLen).Should(Equal(9))
+				})
+			}))
+		})
+	})
+})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
new file mode 100644
index 0000000..4fbbf00
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -0,0 +1,374 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/golang/snappy"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func sharedPrefixLen(a, b []byte) int {
+	i, n := 0, len(a)
+	if n > len(b) {
+		n = len(b)
+	}
+	for i < n && a[i] == b[i] {
+		i++
+	}
+	return i
+}
+
+type blockWriter struct {
+	restartInterval int
+	buf             util.Buffer
+	nEntries        int
+	prevKey         []byte
+	restarts        []uint32
+	scratch         []byte
+}
+
+func (w *blockWriter) append(key, value []byte) {
+	nShared := 0
+	if w.nEntries%w.restartInterval == 0 {
+		w.restarts = append(w.restarts, uint32(w.buf.Len()))
+	} else {
+		nShared = sharedPrefixLen(w.prevKey, key)
+	}
+	n := binary.PutUvarint(w.scratch[0:], uint64(nShared))
+	n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared))
+	n += binary.PutUvarint(w.scratch[n:], uint64(len(value)))
+	w.buf.Write(w.scratch[:n])
+	w.buf.Write(key[nShared:])
+	w.buf.Write(value)
+	w.prevKey = append(w.prevKey[:0], key...)
+	w.nEntries++
+}
+
+func (w *blockWriter) finish() {
+	// Write restarts entry.
+	if w.nEntries == 0 {
+		// Must have at least one restart entry.
+		w.restarts = append(w.restarts, 0)
+	}
+	w.restarts = append(w.restarts, uint32(len(w.restarts)))
+	for _, x := range w.restarts {
+		buf4 := w.buf.Alloc(4)
+		binary.LittleEndian.PutUint32(buf4, x)
+	}
+}
+
+func (w *blockWriter) reset() {
+	w.buf.Reset()
+	w.nEntries = 0
+	w.restarts = w.restarts[:0]
+}
+
+func (w *blockWriter) bytesLen() int {
+	restartsLen := len(w.restarts)
+	if restartsLen == 0 {
+		restartsLen = 1
+	}
+	return w.buf.Len() + 4*restartsLen + 4
+}
+
+type filterWriter struct {
+	generator filter.FilterGenerator
+	buf       util.Buffer
+	nKeys     int
+	offsets   []uint32
+}
+
+func (w *filterWriter) add(key []byte) {
+	if w.generator == nil {
+		return
+	}
+	w.generator.Add(key)
+	w.nKeys++
+}
+
+func (w *filterWriter) flush(offset uint64) {
+	if w.generator == nil {
+		return
+	}
+	for x := int(offset / filterBase); x > len(w.offsets); {
+		w.generate()
+	}
+}
+
+func (w *filterWriter) finish() {
+	if w.generator == nil {
+		return
+	}
+	// Generate last keys.
+
+	if w.nKeys > 0 {
+		w.generate()
+	}
+	w.offsets = append(w.offsets, uint32(w.buf.Len()))
+	for _, x := range w.offsets {
+		buf4 := w.buf.Alloc(4)
+		binary.LittleEndian.PutUint32(buf4, x)
+	}
+	w.buf.WriteByte(filterBaseLg)
+}
+
+func (w *filterWriter) generate() {
+	// Record offset.
+	w.offsets = append(w.offsets, uint32(w.buf.Len()))
+	// Generate filters.
+	if w.nKeys > 0 {
+		w.generator.Generate(&w.buf)
+		w.nKeys = 0
+	}
+}
+
+// Writer is a table writer.
+type Writer struct {
+	writer io.Writer
+	err    error
+	// Options
+	cmp         comparer.Comparer
+	filter      filter.Filter
+	compression opt.Compression
+	blockSize   int
+
+	dataBlock   blockWriter
+	indexBlock  blockWriter
+	filterBlock filterWriter
+	pendingBH   blockHandle
+	offset      uint64
+	nEntries    int
+	// Scratch allocated enough for 5 uvarint. Block writer should not use
+	// first 20-bytes since it will be used to encode block handle, which
+	// then passed to the block writer itself.
+	scratch            [50]byte
+	comparerScratch    []byte
+	compressionScratch []byte
+}
+
+func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
+	// Compress the buffer if necessary.
+	var b []byte
+	if compression == opt.SnappyCompression {
+		// Allocate scratch enough for compression and block trailer.
+		if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
+			w.compressionScratch = make([]byte, n)
+		}
+		compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
+		n := len(compressed)
+		b = compressed[:n+blockTrailerLen]
+		b[n] = blockTypeSnappyCompression
+	} else {
+		tmp := buf.Alloc(blockTrailerLen)
+		tmp[0] = blockTypeNoCompression
+		b = buf.Bytes()
+	}
+
+	// Calculate the checksum.
+	n := len(b) - 4
+	checksum := util.NewCRC(b[:n]).Value()
+	binary.LittleEndian.PutUint32(b[n:], checksum)
+
+	// Write the buffer to the file.
+	_, err = w.writer.Write(b)
+	if err != nil {
+		return
+	}
+	bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
+	w.offset += uint64(len(b))
+	return
+}
+
+func (w *Writer) flushPendingBH(key []byte) {
+	if w.pendingBH.length == 0 {
+		return
+	}
+	var separator []byte
+	if len(key) == 0 {
+		separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey)
+	} else {
+		separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key)
+	}
+	if separator == nil {
+		separator = w.dataBlock.prevKey
+	} else {
+		w.comparerScratch = separator
+	}
+	n := encodeBlockHandle(w.scratch[:20], w.pendingBH)
+	// Append the block handle to the index block.
+	w.indexBlock.append(separator, w.scratch[:n])
+	// Reset prev key of the data block.
+	w.dataBlock.prevKey = w.dataBlock.prevKey[:0]
+	// Clear pending block handle.
+	w.pendingBH = blockHandle{}
+}
+
+func (w *Writer) finishBlock() error {
+	w.dataBlock.finish()
+	bh, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+	if err != nil {
+		return err
+	}
+	w.pendingBH = bh
+	// Reset the data block.
+	w.dataBlock.reset()
+	// Flush the filter block.
+	w.filterBlock.flush(w.offset)
+	return nil
+}
+
+// Append appends key/value pair to the table. The keys passed must
+// be in increasing order.
+//
+// It is safe to modify the contents of the arguments after Append returns.
+func (w *Writer) Append(key, value []byte) error {
+	if w.err != nil {
+		return w.err
+	}
+	if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 {
+		w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key)
+		return w.err
+	}
+
+	w.flushPendingBH(key)
+	// Append key/value pair to the data block.
+	w.dataBlock.append(key, value)
+	// Add key to the filter block.
+	w.filterBlock.add(key)
+
+	// Finish the data block if block size target reached.
+	if w.dataBlock.bytesLen() >= w.blockSize {
+		if err := w.finishBlock(); err != nil {
+			w.err = err
+			return w.err
+		}
+	}
+	w.nEntries++
+	return nil
+}
+
+// BlocksLen returns number of blocks written so far.
+func (w *Writer) BlocksLen() int {
+	n := w.indexBlock.nEntries
+	if w.pendingBH.length > 0 {
+		// Includes the pending block.
+		n++
+	}
+	return n
+}
+
+// EntriesLen returns number of entries added so far.
+func (w *Writer) EntriesLen() int {
+	return w.nEntries
+}
+
+// BytesLen returns number of bytes written so far.
+func (w *Writer) BytesLen() int {
+	return int(w.offset)
+}
+
+// Close will finalize the table. Calling Append is not possible
+// after Close, but calling BlocksLen, EntriesLen and BytesLen
+// is still possible.
+func (w *Writer) Close() error {
+	if w.err != nil {
+		return w.err
+	}
+
+	// Write the last data block. Or empty data block if there
+	// aren't any data blocks at all.
+	if w.dataBlock.nEntries > 0 || w.nEntries == 0 {
+		if err := w.finishBlock(); err != nil {
+			w.err = err
+			return w.err
+		}
+	}
+	w.flushPendingBH(nil)
+
+	// Write the filter block.
+	var filterBH blockHandle
+	w.filterBlock.finish()
+	if buf := &w.filterBlock.buf; buf.Len() > 0 {
+		filterBH, w.err = w.writeBlock(buf, opt.NoCompression)
+		if w.err != nil {
+			return w.err
+		}
+	}
+
+	// Write the metaindex block.
+	if filterBH.length > 0 {
+		key := []byte("filter." + w.filter.Name())
+		n := encodeBlockHandle(w.scratch[:20], filterBH)
+		w.dataBlock.append(key, w.scratch[:n])
+	}
+	w.dataBlock.finish()
+	metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+	if err != nil {
+		w.err = err
+		return w.err
+	}
+
+	// Write the index block.
+	w.indexBlock.finish()
+	indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression)
+	if err != nil {
+		w.err = err
+		return w.err
+	}
+
+	// Write the table footer.
+	footer := w.scratch[:footerLen]
+	for i := range footer {
+		footer[i] = 0
+	}
+	n := encodeBlockHandle(footer, metaindexBH)
+	encodeBlockHandle(footer[n:], indexBH)
+	copy(footer[footerLen-len(magic):], magic)
+	if _, err := w.writer.Write(footer); err != nil {
+		w.err = err
+		return w.err
+	}
+	w.offset += footerLen
+
+	w.err = errors.New("leveldb/table: writer is closed")
+	return nil
+}
+
+// NewWriter creates a new initialized table writer for the file.
+//
+// Table writer is not goroutine-safe.
+func NewWriter(f io.Writer, o *opt.Options) *Writer {
+	w := &Writer{
+		writer:          f,
+		cmp:             o.GetComparer(),
+		filter:          o.GetFilter(),
+		compression:     o.GetCompression(),
+		blockSize:       o.GetBlockSize(),
+		comparerScratch: make([]byte, 0),
+	}
+	// data block
+	w.dataBlock.restartInterval = o.GetBlockRestartInterval()
+	// The first 20-bytes are used for encoding block handle.
+	w.dataBlock.scratch = w.scratch[20:]
+	// index block
+	w.indexBlock.restartInterval = 1
+	w.indexBlock.scratch = w.scratch[20:]
+	// filter block
+	if w.filter != nil {
+		w.filterBlock.generator = w.filter.NewGenerator()
+		w.filterBlock.flush(0)
+	}
+	return w
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
new file mode 100644
index 0000000..de6be34
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
@@ -0,0 +1,222 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package testutil
+
+import (
+	"fmt"
+	"math/rand"
+
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type DB interface{}
+
+type Put interface {
+	TestPut(key []byte, value []byte) error
+}
+
+type Delete interface {
+	TestDelete(key []byte) error
+}
+
+type Find interface {
+	TestFind(key []byte) (rkey, rvalue []byte, err error)
+}
+
+type Get interface {
+	TestGet(key []byte) (value []byte, err error)
+}
+
+type Has interface {
+	TestHas(key []byte) (ret bool, err error)
+}
+
+type NewIterator interface {
+	TestNewIterator(slice *util.Range) iterator.Iterator
+}
+
+type DBAct int
+
+func (a DBAct) String() string {
+	switch a {
+	case DBNone:
+		return "none"
+	case DBPut:
+		return "put"
+	case DBOverwrite:
+		return "overwrite"
+	case DBDelete:
+		return "delete"
+	case DBDeleteNA:
+		return "delete_na"
+	}
+	return "unknown"
+}
+
+const (
+	DBNone DBAct = iota
+	DBPut
+	DBOverwrite
+	DBDelete
+	DBDeleteNA
+)
+
+type DBTesting struct {
+	Rand *rand.Rand
+	DB   interface {
+		Get
+		Put
+		Delete
+	}
+	PostFn             func(t *DBTesting)
+	Deleted, Present   KeyValue
+	Act, LastAct       DBAct
+	ActKey, LastActKey []byte
+}
+
+func (t *DBTesting) post() {
+	if t.PostFn != nil {
+		t.PostFn(t)
+	}
+}
+
+func (t *DBTesting) setAct(act DBAct, key []byte) {
+	t.LastAct, t.Act = t.Act, act
+	t.LastActKey, t.ActKey = t.ActKey, key
+}
+
+func (t *DBTesting) text() string {
+	return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey)
+}
+
+func (t *DBTesting) Text() string {
+	return "DBTesting " + t.text()
+}
+
+func (t *DBTesting) TestPresentKV(key, value []byte) {
+	rvalue, err := t.DB.TestGet(key)
+	Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text())
+	Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text())
+}
+
+func (t *DBTesting) TestAllPresent() {
+	t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) {
+		t.TestPresentKV(key, value)
+	})
+}
+
+func (t *DBTesting) TestDeletedKey(key []byte) {
+	_, err := t.DB.TestGet(key)
+	Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
+}
+
+func (t *DBTesting) TestAllDeleted() {
+	t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) {
+		t.TestDeletedKey(key)
+	})
+}
+
+func (t *DBTesting) TestAll() {
+	dn := t.Deleted.Len()
+	pn := t.Present.Len()
+	ShuffledIndex(t.Rand, dn+pn, 1, func(i int) {
+		if i >= dn {
+			key, value := t.Present.Index(i - dn)
+			t.TestPresentKV(key, value)
+		} else {
+			t.TestDeletedKey(t.Deleted.KeyAt(i))
+		}
+	})
+}
+
+func (t *DBTesting) Put(key, value []byte) {
+	if new := t.Present.PutU(key, value); new {
+		t.setAct(DBPut, key)
+	} else {
+		t.setAct(DBOverwrite, key)
+	}
+	t.Deleted.Delete(key)
+	err := t.DB.TestPut(key, value)
+	Expect(err).ShouldNot(HaveOccurred(), t.Text())
+	t.TestPresentKV(key, value)
+	t.post()
+}
+
+func (t *DBTesting) PutRandom() bool {
+	if t.Deleted.Len() > 0 {
+		i := t.Rand.Intn(t.Deleted.Len())
+		key, value := t.Deleted.Index(i)
+		t.Put(key, value)
+		return true
+	}
+	return false
+}
+
+func (t *DBTesting) Delete(key []byte) {
+	if exist, value := t.Present.Delete(key); exist {
+		t.setAct(DBDelete, key)
+		t.Deleted.PutU(key, value)
+	} else {
+		t.setAct(DBDeleteNA, key)
+	}
+	err := t.DB.TestDelete(key)
+	Expect(err).ShouldNot(HaveOccurred(), t.Text())
+	t.TestDeletedKey(key)
+	t.post()
+}
+
+func (t *DBTesting) DeleteRandom() bool {
+	if t.Present.Len() > 0 {
+		i := t.Rand.Intn(t.Present.Len())
+		t.Delete(t.Present.KeyAt(i))
+		return true
+	}
+	return false
+}
+
+func (t *DBTesting) RandomAct(round int) {
+	for i := 0; i < round; i++ {
+		if t.Rand.Int()%2 == 0 {
+			t.PutRandom()
+		} else {
+			t.DeleteRandom()
+		}
+	}
+}
+
+func DoDBTesting(t *DBTesting) {
+	if t.Rand == nil {
+		t.Rand = NewRand()
+	}
+
+	t.DeleteRandom()
+	t.PutRandom()
+	t.DeleteRandom()
+	t.DeleteRandom()
+	for i := t.Deleted.Len() / 2; i >= 0; i-- {
+		t.PutRandom()
+	}
+	t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10)
+
+	// Additional iterator testing
+	if db, ok := t.DB.(NewIterator); ok {
+		iter := db.TestNewIterator(nil)
+		Expect(iter.Error()).NotTo(HaveOccurred())
+
+		it := IteratorTesting{
+			KeyValue: t.Present,
+			Iter:     iter,
+		}
+
+		DoIteratorTesting(&it)
+		iter.Release()
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
new file mode 100644
index 0000000..82f3d0e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
@@ -0,0 +1,21 @@
+package testutil
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+func RunSuite(t GinkgoTestingT, name string) {
+	RunDefer()
+
+	SynchronizedBeforeSuite(func() []byte {
+		RunDefer("setup")
+		return nil
+	}, func(data []byte) {})
+	SynchronizedAfterSuite(func() {
+		RunDefer("teardown")
+	}, func() {})
+
+	RegisterFailHandler(Fail)
+	RunSpecs(t, name)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go
new file mode 100644
index 0000000..40a1496
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go
@@ -0,0 +1,327 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package testutil
+
+import (
+	"fmt"
+	"math/rand"
+
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+)
+
+type IterAct int
+
+func (a IterAct) String() string {
+	switch a {
+	case IterNone:
+		return "none"
+	case IterFirst:
+		return "first"
+	case IterLast:
+		return "last"
+	case IterPrev:
+		return "prev"
+	case IterNext:
+		return "next"
+	case IterSeek:
+		return "seek"
+	case IterSOI:
+		return "soi"
+	case IterEOI:
+		return "eoi"
+	}
+	return "unknown"
+}
+
+const (
+	IterNone IterAct = iota
+	IterFirst
+	IterLast
+	IterPrev
+	IterNext
+	IterSeek
+	IterSOI
+	IterEOI
+)
+
+type IteratorTesting struct {
+	KeyValue
+	Iter         iterator.Iterator
+	Rand         *rand.Rand
+	PostFn       func(t *IteratorTesting)
+	Pos          int
+	Act, LastAct IterAct
+
+	once bool
+}
+
+func (t *IteratorTesting) init() {
+	if !t.once {
+		t.Pos = -1
+		t.once = true
+	}
+}
+
+func (t *IteratorTesting) post() {
+	if t.PostFn != nil {
+		t.PostFn(t)
+	}
+}
+
+func (t *IteratorTesting) setAct(act IterAct) {
+	t.LastAct, t.Act = t.Act, act
+}
+
+func (t *IteratorTesting) text() string {
+	return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act)
+}
+
+func (t *IteratorTesting) Text() string {
+	return "IteratorTesting is " + t.text()
+}
+
+func (t *IteratorTesting) IsFirst() bool {
+	t.init()
+	return t.Len() > 0 && t.Pos == 0
+}
+
+func (t *IteratorTesting) IsLast() bool {
+	t.init()
+	return t.Len() > 0 && t.Pos == t.Len()-1
+}
+
+func (t *IteratorTesting) TestKV() {
+	t.init()
+	key, value := t.Index(t.Pos)
+	Expect(t.Iter.Key()).NotTo(BeNil())
+	Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text())
+	Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text())
+}
+
+func (t *IteratorTesting) First() {
+	t.init()
+	t.setAct(IterFirst)
+
+	ok := t.Iter.First()
+	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
+	if t.Len() > 0 {
+		t.Pos = 0
+		Expect(ok).Should(BeTrue(), t.Text())
+		t.TestKV()
+	} else {
+		t.Pos = -1
+		Expect(ok).ShouldNot(BeTrue(), t.Text())
+	}
+	t.post()
+}
+
+func (t *IteratorTesting) Last() {
+	t.init()
+	t.setAct(IterLast)
+
+	ok := t.Iter.Last()
+	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
+	if t.Len() > 0 {
+		t.Pos = t.Len() - 1
+		Expect(ok).Should(BeTrue(), t.Text())
+		t.TestKV()
+	} else {
+		t.Pos = 0
+		Expect(ok).ShouldNot(BeTrue(), t.Text())
+	}
+	t.post()
+}
+
+func (t *IteratorTesting) Next() {
+	t.init()
+	t.setAct(IterNext)
+
+	ok := t.Iter.Next()
+	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
+	if t.Pos < t.Len()-1 {
+		t.Pos++
+		Expect(ok).Should(BeTrue(), t.Text())
+		t.TestKV()
+	} else {
+		t.Pos = t.Len()
+		Expect(ok).ShouldNot(BeTrue(), t.Text())
+	}
+	t.post()
+}
+
+func (t *IteratorTesting) Prev() {
+	t.init()
+	t.setAct(IterPrev)
+
+	ok := t.Iter.Prev()
+	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
+	if t.Pos > 0 {
+		t.Pos--
+		Expect(ok).Should(BeTrue(), t.Text())
+		t.TestKV()
+	} else {
+		t.Pos = -1
+		Expect(ok).ShouldNot(BeTrue(), t.Text())
+	}
+	t.post()
+}
+
+func (t *IteratorTesting) Seek(i int) {
+	t.init()
+	t.setAct(IterSeek)
+
+	key, _ := t.Index(i)
+	oldKey, _ := t.IndexOrNil(t.Pos)
+
+	ok := t.Iter.Seek(key)
+	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
+	Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text()))
+
+	t.Pos = i
+	t.TestKV()
+	t.post()
+}
+
+func (t *IteratorTesting) SeekInexact(i int) {
+	t.init()
+	t.setAct(IterSeek)
+	var key0 []byte
+	key1, _ := t.Index(i)
+	if i > 0 {
+		key0, _ = t.Index(i - 1)
+	}
+	key := BytesSeparator(key0, key1)
+	oldKey, _ := t.IndexOrNil(t.Pos)
+
+	ok := t.Iter.Seek(key)
+	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
+	Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text()))
+
+	t.Pos = i
+	t.TestKV()
+	t.post()
+}
+
+func (t *IteratorTesting) SeekKey(key []byte) {
+	t.init()
+	t.setAct(IterSeek)
+	oldKey, _ := t.IndexOrNil(t.Pos)
+	i := t.Search(key)
+
+	ok := t.Iter.Seek(key)
+	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
+	if i < t.Len() {
+		key_, _ := t.Index(i)
+		Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text()))
+		t.Pos = i
+		t.TestKV()
+	} else {
+		Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text()))
+	}
+
+	t.Pos = i
+	t.post()
+}
+
+func (t *IteratorTesting) SOI() {
+	t.init()
+	t.setAct(IterSOI)
+	Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text())
+	for i := 0; i < 3; i++ {
+		t.Prev()
+	}
+	t.post()
+}
+
+func (t *IteratorTesting) EOI() {
+	t.init()
+	t.setAct(IterEOI)
+	Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text())
+	for i := 0; i < 3; i++ {
+		t.Next()
+	}
+	t.post()
+}
+
+func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) {
+	t.init()
+	for old := t.Pos; t.Pos > 0; old = t.Pos {
+		fn(t)
+		Expect(t.Pos).Should(BeNumerically("<", old), t.Text())
+	}
+}
+
+func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) {
+	t.init()
+	for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos {
+		fn(t)
+		Expect(t.Pos).Should(BeNumerically(">", old), t.Text())
+	}
+}
+
+func (t *IteratorTesting) PrevAll() {
+	t.WalkPrev(func(t *IteratorTesting) {
+		t.Prev()
+	})
+}
+
+func (t *IteratorTesting) NextAll() {
+	t.WalkNext(func(t *IteratorTesting) {
+		t.Next()
+	})
+}
+
+func DoIteratorTesting(t *IteratorTesting) {
+	if t.Rand == nil {
+		t.Rand = NewRand()
+	}
+	t.SOI()
+	t.NextAll()
+	t.First()
+	t.SOI()
+	t.NextAll()
+	t.EOI()
+	t.PrevAll()
+	t.Last()
+	t.EOI()
+	t.PrevAll()
+	t.SOI()
+
+	t.NextAll()
+	t.PrevAll()
+	t.NextAll()
+	t.Last()
+	t.PrevAll()
+	t.First()
+	t.NextAll()
+	t.EOI()
+
+	ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
+		t.Seek(i)
+	})
+
+	ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
+		t.SeekInexact(i)
+	})
+
+	ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
+		t.Seek(i)
+		if i%2 != 0 {
+			t.PrevAll()
+			t.SOI()
+		} else {
+			t.NextAll()
+			t.EOI()
+		}
+	})
+
+	for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} {
+		t.SeekKey([]byte(key))
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go
new file mode 100644
index 0000000..3304f98
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go
@@ -0,0 +1,352 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package testutil
+
+import (
+	"fmt"
+	"math/rand"
+	"sort"
+	"strings"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type KeyValueEntry struct {
+	key, value []byte
+}
+
+type KeyValue struct {
+	entries []KeyValueEntry
+	nbytes  int
+}
+
+func (kv *KeyValue) Put(key, value []byte) {
+	if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 {
+		panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key))
+	}
+	kv.entries = append(kv.entries, KeyValueEntry{key, value})
+	kv.nbytes += len(key) + len(value)
+}
+
+func (kv *KeyValue) PutString(key, value string) {
+	kv.Put([]byte(key), []byte(value))
+}
+
+func (kv *KeyValue) PutU(key, value []byte) bool {
+	if i, exist := kv.Get(key); !exist {
+		if i < kv.Len() {
+			kv.entries = append(kv.entries[:i+1], kv.entries[i:]...)
+			kv.entries[i] = KeyValueEntry{key, value}
+		} else {
+			kv.entries = append(kv.entries, KeyValueEntry{key, value})
+		}
+		kv.nbytes += len(key) + len(value)
+		return true
+	} else {
+		kv.nbytes += len(value) - len(kv.ValueAt(i))
+		kv.entries[i].value = value
+	}
+	return false
+}
+
+func (kv *KeyValue) PutUString(key, value string) bool {
+	return kv.PutU([]byte(key), []byte(value))
+}
+
+func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) {
+	i, exist := kv.Get(key)
+	if exist {
+		value = kv.entries[i].value
+		kv.DeleteIndex(i)
+	}
+	return
+}
+
+func (kv *KeyValue) DeleteIndex(i int) bool {
+	if i < kv.Len() {
+		kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i))
+		kv.entries = append(kv.entries[:i], kv.entries[i+1:]...)
+		return true
+	}
+	return false
+}
+
+func (kv KeyValue) Len() int {
+	return len(kv.entries)
+}
+
+func (kv *KeyValue) Size() int {
+	return kv.nbytes
+}
+
+func (kv KeyValue) KeyAt(i int) []byte {
+	return kv.entries[i].key
+}
+
+func (kv KeyValue) ValueAt(i int) []byte {
+	return kv.entries[i].value
+}
+
+func (kv KeyValue) Index(i int) (key, value []byte) {
+	if i < 0 || i >= len(kv.entries) {
+		panic(fmt.Sprintf("Index #%d: out of range", i))
+	}
+	return kv.entries[i].key, kv.entries[i].value
+}
+
+func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) {
+	key, value = kv.Index(i)
+	var key0 []byte
+	var key1 = kv.KeyAt(i)
+	if i > 0 {
+		key0 = kv.KeyAt(i - 1)
+	}
+	key_ = BytesSeparator(key0, key1)
+	return
+}
+
+func (kv KeyValue) IndexOrNil(i int) (key, value []byte) {
+	if i >= 0 && i < len(kv.entries) {
+		return kv.entries[i].key, kv.entries[i].value
+	}
+	return nil, nil
+}
+
+func (kv KeyValue) IndexString(i int) (key, value string) {
+	key_, _value := kv.Index(i)
+	return string(key_), string(_value)
+}
+
+func (kv KeyValue) Search(key []byte) int {
+	return sort.Search(kv.Len(), func(i int) bool {
+		return cmp.Compare(kv.KeyAt(i), key) >= 0
+	})
+}
+
+func (kv KeyValue) SearchString(key string) int {
+	return kv.Search([]byte(key))
+}
+
+func (kv KeyValue) Get(key []byte) (i int, exist bool) {
+	i = kv.Search(key)
+	if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 {
+		exist = true
+	}
+	return
+}
+
+func (kv KeyValue) GetString(key string) (i int, exist bool) {
+	return kv.Get([]byte(key))
+}
+
+func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) {
+	for i, x := range kv.entries {
+		fn(i, x.key, x.value)
+	}
+}
+
+func (kv KeyValue) IterateString(fn func(i int, key, value string)) {
+	kv.Iterate(func(i int, key, value []byte) {
+		fn(i, string(key), string(value))
+	})
+}
+
+func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) {
+	ShuffledIndex(rnd, kv.Len(), 1, func(i int) {
+		fn(i, kv.entries[i].key, kv.entries[i].value)
+	})
+}
+
+func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) {
+	kv.IterateShuffled(rnd, func(i int, key, value []byte) {
+		fn(i, string(key), string(value))
+	})
+}
+
+func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) {
+	for i := range kv.entries {
+		key_, key, value := kv.IndexInexact(i)
+		fn(i, key_, key, value)
+	}
+}
+
+func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) {
+	kv.IterateInexact(func(i int, key_, key, value []byte) {
+		fn(i, string(key_), string(key), string(value))
+	})
+}
+
+func (kv KeyValue) Clone() KeyValue {
+	return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes}
+}
+
+func (kv KeyValue) Slice(start, limit int) KeyValue {
+	if start < 0 || limit > kv.Len() {
+		panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit))
+	} else if limit < start {
+		panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit))
+	}
+	return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes}
+}
+
+func (kv KeyValue) SliceKey(start, limit []byte) KeyValue {
+	start_ := 0
+	limit_ := kv.Len()
+	if start != nil {
+		start_ = kv.Search(start)
+	}
+	if limit != nil {
+		limit_ = kv.Search(limit)
+	}
+	return kv.Slice(start_, limit_)
+}
+
+func (kv KeyValue) SliceKeyString(start, limit string) KeyValue {
+	return kv.SliceKey([]byte(start), []byte(limit))
+}
+
+func (kv KeyValue) SliceRange(r *util.Range) KeyValue {
+	if r != nil {
+		return kv.SliceKey(r.Start, r.Limit)
+	}
+	return kv.Clone()
+}
+
+func (kv KeyValue) Range(start, limit int) (r util.Range) {
+	if kv.Len() > 0 {
+		if start == kv.Len() {
+			r.Start = BytesAfter(kv.KeyAt(start - 1))
+		} else {
+			r.Start = kv.KeyAt(start)
+		}
+	}
+	if limit < kv.Len() {
+		r.Limit = kv.KeyAt(limit)
+	}
+	return
+}
+
+func KeyValue_EmptyKey() *KeyValue {
+	kv := &KeyValue{}
+	kv.PutString("", "v")
+	return kv
+}
+
+func KeyValue_EmptyValue() *KeyValue {
+	kv := &KeyValue{}
+	kv.PutString("abc", "")
+	kv.PutString("abcd", "")
+	return kv
+}
+
+func KeyValue_OneKeyValue() *KeyValue {
+	kv := &KeyValue{}
+	kv.PutString("abc", "v")
+	return kv
+}
+
+func KeyValue_BigValue() *KeyValue {
+	kv := &KeyValue{}
+	kv.PutString("big1", strings.Repeat("1", 200000))
+	return kv
+}
+
+func KeyValue_SpecialKey() *KeyValue {
+	kv := &KeyValue{}
+	kv.PutString("\xff\xff", "v3")
+	return kv
+}
+
+func KeyValue_MultipleKeyValue() *KeyValue {
+	kv := &KeyValue{}
+	kv.PutString("a", "v")
+	kv.PutString("aa", "v1")
+	kv.PutString("aaa", "v2")
+	kv.PutString("aaacccccccccc", "v2")
+	kv.PutString("aaaccccccccccd", "v3")
+	kv.PutString("aaaccccccccccf", "v4")
+	kv.PutString("aaaccccccccccfg", "v5")
+	kv.PutString("ab", "v6")
+	kv.PutString("abc", "v7")
+	kv.PutString("abcd", "v8")
+	kv.PutString("accccccccccccccc", "v9")
+	kv.PutString("b", "v10")
+	kv.PutString("bb", "v11")
+	kv.PutString("bc", "v12")
+	kv.PutString("c", "v13")
+	kv.PutString("c1", "v13")
+	kv.PutString("czzzzzzzzzzzzzz", "v14")
+	kv.PutString("fffffffffffffff", "v15")
+	kv.PutString("g11", "v15")
+	kv.PutString("g111", "v15")
+	kv.PutString("g111\xff", "v15")
+	kv.PutString("zz", "v16")
+	kv.PutString("zzzzzzz", "v16")
+	kv.PutString("zzzzzzzzzzzzzzzz", "v16")
+	return kv
+}
+
+var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy")
+
+func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue {
+	if rnd == nil {
+		rnd = NewRand()
+	}
+	if maxlen < minlen {
+		panic("max len should >= min len")
+	}
+
+	rrand := func(min, max int) int {
+		if min == max {
+			return max
+		}
+		return rnd.Intn(max-min) + min
+	}
+
+	kv := &KeyValue{}
+	endC := byte(len(keymap) - 1)
+	gen := make([]byte, 0, maxlen)
+	for i := 0; i < n; i++ {
+		m := rrand(minlen, maxlen)
+		last := gen
+	retry:
+		gen = last[:m]
+		if k := len(last); m > k {
+			for j := k; j < m; j++ {
+				gen[j] = 0
+			}
+		} else {
+			for j := m - 1; j >= 0; j-- {
+				c := last[j]
+				if c == endC {
+					continue
+				}
+				gen[j] = c + 1
+				for j += 1; j < m; j++ {
+					gen[j] = 0
+				}
+				goto ok
+			}
+			if m < maxlen {
+				m++
+				goto retry
+			}
+			panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n))
+		ok:
+		}
+		key := make([]byte, m)
+		for j := 0; j < m; j++ {
+			key[j] = keymap[gen[j]]
+		}
+		value := make([]byte, rrand(vminlen, vmaxlen))
+		for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ {
+			value[n] = 'x'
+		}
+		kv.Put(key, value)
+	}
+	return kv
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
new file mode 100644
index 0000000..f85692f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
@@ -0,0 +1,187 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package testutil
+
+import (
+	"fmt"
+	"math/rand"
+
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
+	if rnd == nil {
+		rnd = NewRand()
+	}
+
+	if p == nil {
+		BeforeEach(func() {
+			p = setup(kv)
+		})
+		if teardown != nil {
+			AfterEach(func() {
+				teardown(p)
+			})
+		}
+	}
+
+	It("Should find all keys with Find", func() {
+		if db, ok := p.(Find); ok {
+			ShuffledIndex(nil, kv.Len(), 1, func(i int) {
+				key_, key, value := kv.IndexInexact(i)
+
+				// Using exact key.
+				rkey, rvalue, err := db.TestFind(key)
+				Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
+				Expect(rkey).Should(Equal(key), "Key")
+				Expect(rvalue).Should(Equal(value), "Value for key %q", key)
+
+				// Using inexact key.
+				rkey, rvalue, err = db.TestFind(key_)
+				Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key)
+				Expect(rkey).Should(Equal(key))
+				Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
+			})
+		}
+	})
+
+	It("Should return error if the key is not present", func() {
+		if db, ok := p.(Find); ok {
+			var key []byte
+			if kv.Len() > 0 {
+				key_, _ := kv.Index(kv.Len() - 1)
+				key = BytesAfter(key_)
+			}
+			rkey, _, err := db.TestFind(key)
+			Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
+			Expect(err).Should(Equal(errors.ErrNotFound))
+		}
+	})
+
+	It("Should only find exact key with Get", func() {
+		if db, ok := p.(Get); ok {
+			ShuffledIndex(nil, kv.Len(), 1, func(i int) {
+				key_, key, value := kv.IndexInexact(i)
+
+				// Using exact key.
+				rvalue, err := db.TestGet(key)
+				Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
+				Expect(rvalue).Should(Equal(value), "Value for key %q", key)
+
+				// Using inexact key.
+				if len(key_) > 0 {
+					_, err = db.TestGet(key_)
+					Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
+					Expect(err).Should(Equal(errors.ErrNotFound))
+				}
+			})
+		}
+	})
+
+	It("Should only find present key with Has", func() {
+		if db, ok := p.(Has); ok {
+			ShuffledIndex(nil, kv.Len(), 1, func(i int) {
+				key_, key, _ := kv.IndexInexact(i)
+
+				// Using exact key.
+				ret, err := db.TestHas(key)
+				Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
+				Expect(ret).Should(BeTrue(), "False for key %q", key)
+
+				// Using inexact key.
+				if len(key_) > 0 {
+					ret, err = db.TestHas(key_)
+					Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
+					Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
+				}
+			})
+		}
+	})
+
+	TestIter := func(r *util.Range, _kv KeyValue) {
+		if db, ok := p.(NewIterator); ok {
+			iter := db.TestNewIterator(r)
+			Expect(iter.Error()).ShouldNot(HaveOccurred())
+
+			t := IteratorTesting{
+				KeyValue: _kv,
+				Iter:     iter,
+			}
+
+			DoIteratorTesting(&t)
+			iter.Release()
+		}
+	}
+
+	It("Should iterates and seeks correctly", func(done Done) {
+		TestIter(nil, kv.Clone())
+		done <- true
+	}, 3.0)
+
+	RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
+		type slice struct {
+			r            *util.Range
+			start, limit int
+		}
+
+		key_, _, _ := kv.IndexInexact(i)
+		for _, x := range []slice{
+			{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
+			{&util.Range{Start: nil, Limit: key_}, 0, i},
+		} {
+			It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
+				TestIter(x.r, kv.Slice(x.start, x.limit))
+				done <- true
+			}, 3.0)
+		}
+	})
+
+	RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
+		It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
+			r := kv.Range(start, limit)
+			TestIter(&r, kv.Slice(start, limit))
+			done <- true
+		}, 3.0)
+	})
+}
+
+func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
+	Test := func(kv *KeyValue) func() {
+		return func() {
+			var p DB
+			if setup != nil {
+				Defer("setup", func() {
+					p = setup(*kv)
+				})
+			}
+			if teardown != nil {
+				Defer("teardown", func() {
+					teardown(p)
+				})
+			}
+			if body != nil {
+				p = body(*kv)
+			}
+			KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
+				return p
+			}, nil)
+		}
+	}
+
+	Describe("with no key/value (empty)", Test(&KeyValue{}))
+	Describe("with empty key", Test(KeyValue_EmptyKey()))
+	Describe("with empty value", Test(KeyValue_EmptyValue()))
+	Describe("with one key/value", Test(KeyValue_OneKeyValue()))
+	Describe("with big value", Test(KeyValue_BigValue()))
+	Describe("with special key", Test(KeyValue_SpecialKey()))
+	Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
+	Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
new file mode 100644
index 0000000..2458e1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
@@ -0,0 +1,586 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package testutil
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"sync"
+
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	storageMu     sync.Mutex
+	storageUseFS  bool = true
+	storageKeepFS bool = false
+	storageNum    int
+)
+
+type StorageMode int
+
+const (
+	ModeOpen StorageMode = 1 << iota
+	ModeCreate
+	ModeRemove
+	ModeRead
+	ModeWrite
+	ModeSync
+	ModeClose
+)
+
+const (
+	modeOpen = iota
+	modeCreate
+	modeRemove
+	modeRead
+	modeWrite
+	modeSync
+	modeClose
+
+	modeCount
+)
+
+const (
+	typeManifest = iota
+	typeJournal
+	typeTable
+	typeTemp
+
+	typeCount
+)
+
+const flattenCount = modeCount * typeCount
+
+func flattenType(m StorageMode, t storage.FileType) int {
+	var x int
+	switch m {
+	case ModeOpen:
+		x = modeOpen
+	case ModeCreate:
+		x = modeCreate
+	case ModeRemove:
+		x = modeRemove
+	case ModeRead:
+		x = modeRead
+	case ModeWrite:
+		x = modeWrite
+	case ModeSync:
+		x = modeSync
+	case ModeClose:
+		x = modeClose
+	default:
+		panic("invalid storage mode")
+	}
+	x *= typeCount
+	switch t {
+	case storage.TypeManifest:
+		return x + typeManifest
+	case storage.TypeJournal:
+		return x + typeJournal
+	case storage.TypeTable:
+		return x + typeTable
+	case storage.TypeTemp:
+		return x + typeTemp
+	default:
+		panic("invalid file type")
+	}
+}
+
+func listFlattenType(m StorageMode, t storage.FileType) []int {
+	ret := make([]int, 0, flattenCount)
+	add := func(x int) {
+		x *= typeCount
+		switch {
+		case t&storage.TypeManifest != 0:
+			ret = append(ret, x+typeManifest)
+		case t&storage.TypeJournal != 0:
+			ret = append(ret, x+typeJournal)
+		case t&storage.TypeTable != 0:
+			ret = append(ret, x+typeTable)
+		case t&storage.TypeTemp != 0:
+			ret = append(ret, x+typeTemp)
+		}
+	}
+	switch {
+	case m&ModeOpen != 0:
+		add(modeOpen)
+	case m&ModeCreate != 0:
+		add(modeCreate)
+	case m&ModeRemove != 0:
+		add(modeRemove)
+	case m&ModeRead != 0:
+		add(modeRead)
+	case m&ModeWrite != 0:
+		add(modeWrite)
+	case m&ModeSync != 0:
+		add(modeSync)
+	case m&ModeClose != 0:
+		add(modeClose)
+	}
+	return ret
+}
+
+func packFile(num uint64, t storage.FileType) uint64 {
+	if num>>(64-typeCount) != 0 {
+		panic("overflow")
+	}
+	return num<<typeCount | uint64(t)
+}
+
+func unpackFile(x uint64) (uint64, storage.FileType) {
+	return x >> typeCount, storage.FileType(x) & storage.TypeAll
+}
+
+type emulatedError struct {
+	err error
+}
+
+func (err emulatedError) Error() string {
+	return fmt.Sprintf("emulated storage error: %v", err.err)
+}
+
+type storageLock struct {
+	s *Storage
+	r util.Releaser
+}
+
+func (l storageLock) Release() {
+	l.r.Release()
+	l.s.logI("storage lock released")
+}
+
+type reader struct {
+	f *file
+	storage.Reader
+}
+
+func (r *reader) Read(p []byte) (n int, err error) {
+	err = r.f.s.emulateError(ModeRead, r.f.Type())
+	if err == nil {
+		r.f.s.stall(ModeRead, r.f.Type())
+		n, err = r.Reader.Read(p)
+	}
+	r.f.s.count(ModeRead, r.f.Type(), n)
+	if err != nil && err != io.EOF {
+		r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err)
+	}
+	return
+}
+
+func (r *reader) ReadAt(p []byte, off int64) (n int, err error) {
+	err = r.f.s.emulateError(ModeRead, r.f.Type())
+	if err == nil {
+		r.f.s.stall(ModeRead, r.f.Type())
+		n, err = r.Reader.ReadAt(p, off)
+	}
+	r.f.s.count(ModeRead, r.f.Type(), n)
+	if err != nil && err != io.EOF {
+		r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err)
+	}
+	return
+}
+
+func (r *reader) Close() (err error) {
+	return r.f.doClose(r.Reader)
+}
+
+type writer struct {
+	f *file
+	storage.Writer
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+	err = w.f.s.emulateError(ModeWrite, w.f.Type())
+	if err == nil {
+		w.f.s.stall(ModeWrite, w.f.Type())
+		n, err = w.Writer.Write(p)
+	}
+	w.f.s.count(ModeWrite, w.f.Type(), n)
+	if err != nil && err != io.EOF {
+		w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err)
+	}
+	return
+}
+
+func (w *writer) Sync() (err error) {
+	err = w.f.s.emulateError(ModeSync, w.f.Type())
+	if err == nil {
+		w.f.s.stall(ModeSync, w.f.Type())
+		err = w.Writer.Sync()
+	}
+	w.f.s.count(ModeSync, w.f.Type(), 0)
+	if err != nil {
+		w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err)
+	}
+	return
+}
+
+func (w *writer) Close() (err error) {
+	return w.f.doClose(w.Writer)
+}
+
+type file struct {
+	s *Storage
+	storage.File
+}
+
+func (f *file) pack() uint64 {
+	return packFile(f.Num(), f.Type())
+}
+
+func (f *file) assertOpen() {
+	ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()])
+}
+
+func (f *file) doClose(closer io.Closer) (err error) {
+	err = f.s.emulateError(ModeClose, f.Type())
+	if err == nil {
+		f.s.stall(ModeClose, f.Type())
+	}
+	f.s.mu.Lock()
+	defer f.s.mu.Unlock()
+	if err == nil {
+		ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type())
+		err = closer.Close()
+	}
+	f.s.countNB(ModeClose, f.Type(), 0)
+	writer := f.s.opens[f.pack()]
+	if err != nil {
+		f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err)
+	} else {
+		f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer)
+		delete(f.s.opens, f.pack())
+	}
+	return
+}
+
+func (f *file) Open() (r storage.Reader, err error) {
+	err = f.s.emulateError(ModeOpen, f.Type())
+	if err == nil {
+		f.s.stall(ModeOpen, f.Type())
+	}
+	f.s.mu.Lock()
+	defer f.s.mu.Unlock()
+	if err == nil {
+		f.assertOpen()
+		f.s.countNB(ModeOpen, f.Type(), 0)
+		r, err = f.File.Open()
+	}
+	if err != nil {
+		f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
+	} else {
+		f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type())
+		f.s.opens[f.pack()] = false
+		r = &reader{f, r}
+	}
+	return
+}
+
+func (f *file) Create() (w storage.Writer, err error) {
+	err = f.s.emulateError(ModeCreate, f.Type())
+	if err == nil {
+		f.s.stall(ModeCreate, f.Type())
+	}
+	f.s.mu.Lock()
+	defer f.s.mu.Unlock()
+	if err == nil {
+		f.assertOpen()
+		f.s.countNB(ModeCreate, f.Type(), 0)
+		w, err = f.File.Create()
+	}
+	if err != nil {
+		f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
+	} else {
+		f.s.logI("file created, num=%d type=%v", f.Num(), f.Type())
+		f.s.opens[f.pack()] = true
+		w = &writer{f, w}
+	}
+	return
+}
+
+func (f *file) Remove() (err error) {
+	err = f.s.emulateError(ModeRemove, f.Type())
+	if err == nil {
+		f.s.stall(ModeRemove, f.Type())
+	}
+	f.s.mu.Lock()
+	defer f.s.mu.Unlock()
+	if err == nil {
+		f.assertOpen()
+		f.s.countNB(ModeRemove, f.Type(), 0)
+		err = f.File.Remove()
+	}
+	if err != nil {
+		f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
+	} else {
+		f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type())
+	}
+	return
+}
+
+type Storage struct {
+	storage.Storage
+	closeFn func() error
+
+	lmu sync.Mutex
+	lb  bytes.Buffer
+
+	mu sync.Mutex
+	// Open files, true=writer, false=reader
+	opens         map[uint64]bool
+	counters      [flattenCount]int
+	bytesCounter  [flattenCount]int64
+	emulatedError [flattenCount]error
+	stallCond     sync.Cond
+	stalled       [flattenCount]bool
+}
+
+func (s *Storage) log(skip int, str string) {
+	s.lmu.Lock()
+	defer s.lmu.Unlock()
+	_, file, line, ok := runtime.Caller(skip + 2)
+	if ok {
+		// Truncate file name at last file name separator.
+		if index := strings.LastIndex(file, "/"); index >= 0 {
+			file = file[index+1:]
+		} else if index = strings.LastIndex(file, "\\"); index >= 0 {
+			file = file[index+1:]
+		}
+	} else {
+		file = "???"
+		line = 1
+	}
+	fmt.Fprintf(&s.lb, "%s:%d: ", file, line)
+	lines := strings.Split(str, "\n")
+	if l := len(lines); l > 1 && lines[l-1] == "" {
+		lines = lines[:l-1]
+	}
+	for i, line := range lines {
+		if i > 0 {
+			s.lb.WriteString("\n\t")
+		}
+		s.lb.WriteString(line)
+	}
+	s.lb.WriteByte('\n')
+}
+
+func (s *Storage) logISkip(skip int, format string, args ...interface{}) {
+	pc, _, _, ok := runtime.Caller(skip + 1)
+	if ok {
+		if f := runtime.FuncForPC(pc); f != nil {
+			fname := f.Name()
+			if index := strings.LastIndex(fname, "."); index >= 0 {
+				fname = fname[index+1:]
+			}
+			format = fname + ": " + format
+		}
+	}
+	s.log(skip+1, fmt.Sprintf(format, args...))
+}
+
+func (s *Storage) logI(format string, args ...interface{}) {
+	s.logISkip(1, format, args...)
+}
+
+func (s *Storage) Log(str string) {
+	s.log(1, "Log: "+str)
+	s.Storage.Log(str)
+}
+
+func (s *Storage) Lock() (r util.Releaser, err error) {
+	r, err = s.Storage.Lock()
+	if err != nil {
+		s.logI("storage locking failed, err=%v", err)
+	} else {
+		s.logI("storage locked")
+		r = storageLock{s, r}
+	}
+	return
+}
+
+func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File {
+	return &file{s, s.Storage.GetFile(num, t)}
+}
+
+func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) {
+	rfiles, err := s.Storage.GetFiles(t)
+	if err != nil {
+		s.logI("get files failed, err=%v", err)
+		return
+	}
+	files = make([]storage.File, len(rfiles))
+	for i, f := range rfiles {
+		files[i] = &file{s, f}
+	}
+	s.logI("get files, type=0x%x count=%d", int(t), len(files))
+	return
+}
+
+func (s *Storage) GetManifest() (f storage.File, err error) {
+	manifest, err := s.Storage.GetManifest()
+	if err != nil {
+		if !os.IsNotExist(err) {
+			s.logI("get manifest failed, err=%v", err)
+		}
+		return
+	}
+	s.logI("get manifest, num=%d", manifest.Num())
+	return &file{s, manifest}, nil
+}
+
+func (s *Storage) SetManifest(f storage.File) error {
+	f_, ok := f.(*file)
+	ExpectWithOffset(1, ok).To(BeTrue())
+	ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest))
+	err := s.Storage.SetManifest(f_.File)
+	if err != nil {
+		s.logI("set manifest failed, err=%v", err)
+	} else {
+		s.logI("set manifest, num=%d", f_.Num())
+	}
+	return err
+}
+
+func (s *Storage) openFiles() string {
+	out := "Open files:"
+	for x, writer := range s.opens {
+		num, t := unpackFile(x)
+		out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer)
+	}
+	return out
+}
+
+func (s *Storage) Close() error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
+	err := s.Storage.Close()
+	if err != nil {
+		s.logI("storage closing failed, err=%v", err)
+	} else {
+		s.logI("storage closed")
+	}
+	if s.closeFn != nil {
+		if err1 := s.closeFn(); err1 != nil {
+			s.logI("close func error, err=%v", err1)
+		}
+	}
+	return err
+}
+
+func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) {
+	s.counters[flattenType(m, t)]++
+	s.bytesCounter[flattenType(m, t)] += int64(n)
+}
+
+func (s *Storage) count(m StorageMode, t storage.FileType, n int) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.countNB(m, t, n)
+}
+
+func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) {
+	for _, x := range listFlattenType(m, t) {
+		s.counters[x] = 0
+		s.bytesCounter[x] = 0
+	}
+}
+
+func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) {
+	for _, x := range listFlattenType(m, t) {
+		count += s.counters[x]
+		bytes += s.bytesCounter[x]
+	}
+	return
+}
+
+func (s *Storage) emulateError(m StorageMode, t storage.FileType) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	err := s.emulatedError[flattenType(m, t)]
+	if err != nil {
+		return emulatedError{err}
+	}
+	return nil
+}
+
+func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	for _, x := range listFlattenType(m, t) {
+		s.emulatedError[x] = err
+	}
+}
+
+func (s *Storage) stall(m StorageMode, t storage.FileType) {
+	x := flattenType(m, t)
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	for s.stalled[x] {
+		s.stallCond.Wait()
+	}
+}
+
+func (s *Storage) Stall(m StorageMode, t storage.FileType) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	for _, x := range listFlattenType(m, t) {
+		s.stalled[x] = true
+	}
+}
+
+func (s *Storage) Release(m StorageMode, t storage.FileType) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	for _, x := range listFlattenType(m, t) {
+		s.stalled[x] = false
+	}
+	s.stallCond.Broadcast()
+}
+
+func NewStorage() *Storage {
+	var stor storage.Storage
+	var closeFn func() error
+	if storageUseFS {
+		for {
+			storageMu.Lock()
+			num := storageNum
+			storageNum++
+			storageMu.Unlock()
+			path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
+			if _, err := os.Stat(path); os.IsNotExist(err) {
+				stor, err = storage.OpenFile(path)
+				ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path)
+				closeFn = func() error {
+					if storageKeepFS {
+						return nil
+					}
+					return os.RemoveAll(path)
+				}
+				break
+			}
+		}
+	} else {
+		stor = storage.NewMemStorage()
+	}
+	s := &Storage{
+		Storage: stor,
+		closeFn: closeFn,
+		opens:   make(map[uint64]bool),
+	}
+	s.stallCond.L = &s.mu
+	return s
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
new file mode 100644
index 0000000..8eef32c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package testutil
+
+import (
+	"bytes"
+	"flag"
+	"math/rand"
+	"reflect"
+	"sync"
+
+	"github.com/onsi/ginkgo/config"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
+)
+
+var (
+	runfn = make(map[string][]func())
+	runmu sync.Mutex
+)
+
+func Defer(args ...interface{}) bool {
+	var (
+		group string
+		fn    func()
+	)
+	for _, arg := range args {
+		v := reflect.ValueOf(arg)
+		switch v.Kind() {
+		case reflect.String:
+			group = v.String()
+		case reflect.Func:
+			r := reflect.ValueOf(&fn).Elem()
+			r.Set(v)
+		}
+	}
+	if fn != nil {
+		runmu.Lock()
+		runfn[group] = append(runfn[group], fn)
+		runmu.Unlock()
+	}
+	return true
+}
+
+func RunDefer(groups ...string) bool {
+	if len(groups) == 0 {
+		groups = append(groups, "")
+	}
+	runmu.Lock()
+	var runfn_ []func()
+	for _, group := range groups {
+		runfn_ = append(runfn_, runfn[group]...)
+		delete(runfn, group)
+	}
+	runmu.Unlock()
+	for _, fn := range runfn_ {
+		fn()
+	}
+	return runfn_ != nil
+}
+
+func RandomSeed() int64 {
+	if !flag.Parsed() {
+		panic("random seed not initialized")
+	}
+	return config.GinkgoConfig.RandomSeed
+}
+
+func NewRand() *rand.Rand {
+	return rand.New(rand.NewSource(RandomSeed()))
+}
+
+var cmp = comparer.DefaultComparer
+
+func BytesSeparator(a, b []byte) []byte {
+	if bytes.Equal(a, b) {
+		return b
+	}
+	i, n := 0, len(a)
+	if n > len(b) {
+		n = len(b)
+	}
+	for ; i < n && (a[i] == b[i]); i++ {
+	}
+	x := append([]byte{}, a[:i]...)
+	if i < n {
+		if c := a[i] + 1; c < b[i] {
+			return append(x, c)
+		}
+		x = append(x, a[i])
+		i++
+	}
+	for ; i < len(a); i++ {
+		if c := a[i]; c < 0xff {
+			return append(x, c+1)
+		} else {
+			x = append(x, c)
+		}
+	}
+	if len(b) > i && b[i] > 0 {
+		return append(x, b[i]-1)
+	}
+	return append(x, 'x')
+}
+
+func BytesAfter(b []byte) []byte {
+	var x []byte
+	for _, c := range b {
+		if c < 0xff {
+			return append(x, c+1)
+		} else {
+			x = append(x, c)
+		}
+	}
+	return append(x, 'x')
+}
+
+func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
+	if rnd == nil {
+		rnd = NewRand()
+	}
+	for x := 0; x < round; x++ {
+		fn(rnd.Intn(n))
+	}
+	return
+}
+
+func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
+	if rnd == nil {
+		rnd = NewRand()
+	}
+	for x := 0; x < round; x++ {
+		for _, i := range rnd.Perm(n) {
+			fn(i)
+		}
+	}
+	return
+}
+
+func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) {
+	if rnd == nil {
+		rnd = NewRand()
+	}
+	for x := 0; x < round; x++ {
+		start := rnd.Intn(n)
+		length := 0
+		if j := n - start; j > 0 {
+			length = rnd.Intn(j)
+		}
+		fn(start, start+length)
+	}
+	return
+}
+
+func Max(x, y int) int {
+	if x > y {
+		return x
+	}
+	return y
+}
+
+func Min(x, y int) int {
+	if x < y {
+		return x
+	}
+	return y
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
new file mode 100644
index 0000000..0114580
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	. "github.com/onsi/gomega"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type testingDB struct {
+	*DB
+	ro   *opt.ReadOptions
+	wo   *opt.WriteOptions
+	stor *testutil.Storage
+}
+
+func (t *testingDB) TestPut(key []byte, value []byte) error {
+	return t.Put(key, value, t.wo)
+}
+
+func (t *testingDB) TestDelete(key []byte) error {
+	return t.Delete(key, t.wo)
+}
+
+func (t *testingDB) TestGet(key []byte) (value []byte, err error) {
+	return t.Get(key, t.ro)
+}
+
+func (t *testingDB) TestHas(key []byte) (ret bool, err error) {
+	return t.Has(key, t.ro)
+}
+
+func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator {
+	return t.NewIterator(slice, t.ro)
+}
+
+func (t *testingDB) TestClose() {
+	err := t.Close()
+	ExpectWithOffset(1, err).NotTo(HaveOccurred())
+	err = t.stor.Close()
+	ExpectWithOffset(1, err).NotTo(HaveOccurred())
+}
+
+func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB {
+	stor := testutil.NewStorage()
+	db, err := Open(stor, o)
+	// FIXME: This may be called from outside It, which may cause panic.
+	Expect(err).NotTo(HaveOccurred())
+	return &testingDB{
+		DB:   db,
+		ro:   ro,
+		wo:   wo,
+		stor: stor,
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
new file mode 100644
index 0000000..21f4f24
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"fmt"
+	"sort"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+func shorten(str string) string {
+	if len(str) <= 8 {
+		return str
+	}
+	return str[:3] + ".." + str[len(str)-3:]
+}
+
+var bunits = [...]string{"", "Ki", "Mi", "Gi"}
+
+func shortenb(bytes int) string {
+	i := 0
+	for ; bytes > 1024 && i < 4; i++ {
+		bytes /= 1024
+	}
+	return fmt.Sprintf("%d%sB", bytes, bunits[i])
+}
+
+func sshortenb(bytes int) string {
+	if bytes == 0 {
+		return "~"
+	}
+	sign := "+"
+	if bytes < 0 {
+		sign = "-"
+		bytes *= -1
+	}
+	i := 0
+	for ; bytes > 1024 && i < 4; i++ {
+		bytes /= 1024
+	}
+	return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i])
+}
+
+func sint(x int) string {
+	if x == 0 {
+		return "~"
+	}
+	sign := "+"
+	if x < 0 {
+		sign = "-"
+		x *= -1
+	}
+	return fmt.Sprintf("%s%d", sign, x)
+}
+
+func minInt(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func maxInt(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+type files []storage.File
+
+func (p files) Len() int {
+	return len(p)
+}
+
+func (p files) Less(i, j int) bool {
+	return p[i].Num() < p[j].Num()
+}
+
+func (p files) Swap(i, j int) {
+	p[i], p[j] = p[j], p[i]
+}
+
+func (p files) sort() {
+	sort.Sort(p)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go
new file mode 100644
index 0000000..21de242
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+// This a copy of Go std bytes.Buffer with some modification
+// and some features stripped.
+
+import (
+	"bytes"
+	"io"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+	buf       []byte   // contents are the bytes buf[off : len(buf)]
+	off       int      // read at &buf[off], write at &buf[len(buf)]
+	bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+}
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len().  If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string.  If the Buffer is a nil pointer, it returns "<nil>".
+func (b *Buffer) String() string {
+	if b == nil {
+		// Special case, useful in debugging.
+		return "<nil>"
+	}
+	return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+	switch {
+	case n < 0 || n > b.Len():
+		panic("leveldb/util.Buffer: truncation out of range")
+	case n == 0:
+		// Reuse buffer space.
+		b.off = 0
+	}
+	b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+	m := b.Len()
+	// If buffer is empty, reset to recover space.
+	if m == 0 && b.off != 0 {
+		b.Truncate(0)
+	}
+	if len(b.buf)+n > cap(b.buf) {
+		var buf []byte
+		if b.buf == nil && n <= len(b.bootstrap) {
+			buf = b.bootstrap[0:]
+		} else if m+n <= cap(b.buf)/2 {
+			// We can slide things down instead of allocating a new
+			// slice. We only need m+n <= cap(b.buf) to slide, but
+			// we instead let capacity get twice as large so we
+			// don't spend all our time copying.
+			copy(b.buf[:], b.buf[b.off:])
+			buf = b.buf[:m]
+		} else {
+			// not enough space anywhere
+			buf = makeSlice(2*cap(b.buf) + n)
+			copy(buf, b.buf[b.off:])
+		}
+		b.buf = buf
+		b.off = 0
+	}
+	b.buf = b.buf[0 : b.off+m+n]
+	return b.off + m
+}
+
+// Alloc allocs n bytes of slice from the buffer, growing the buffer as
+// needed. If n is negative, Alloc will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Alloc(n int) []byte {
+	if n < 0 {
+		panic("leveldb/util.Buffer.Alloc: negative count")
+	}
+	m := b.grow(n)
+	return b.buf[m:]
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+	if n < 0 {
+		panic("leveldb/util.Buffer.Grow: negative count")
+	}
+	m := b.grow(n)
+	b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with bytes.ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+	m := b.grow(len(p))
+	return copy(b.buf[m:], p), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom.  As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+	// If buffer is empty, reset to recover space.
+	if b.off >= len(b.buf) {
+		b.Truncate(0)
+	}
+	for {
+		if free := cap(b.buf) - len(b.buf); free < MinRead {
+			// not enough space at end
+			newBuf := b.buf
+			if b.off+free < MinRead {
+				// not enough space using beginning of buffer;
+				// double buffer capacity
+				newBuf = makeSlice(2*cap(b.buf) + MinRead)
+			}
+			copy(newBuf, b.buf[b.off:])
+			b.buf = newBuf[:len(b.buf)-b.off]
+			b.off = 0
+		}
+		m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+		b.buf = b.buf[0 : len(b.buf)+m]
+		n += int64(m)
+		if e == io.EOF {
+			break
+		}
+		if e != nil {
+			return n, e
+		}
+	}
+	return n, nil // err is EOF, so return nil explicitly
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with bytes.ErrTooLarge.
+func makeSlice(n int) []byte {
+	// If the make fails, give a known error.
+	defer func() {
+		if recover() != nil {
+			panic(bytes.ErrTooLarge)
+		}
+	}()
+	return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+	if b.off < len(b.buf) {
+		nBytes := b.Len()
+		m, e := w.Write(b.buf[b.off:])
+		if m > nBytes {
+			panic("leveldb/util.Buffer.WriteTo: invalid Write count")
+		}
+		b.off += m
+		n = int64(m)
+		if e != nil {
+			return n, e
+		}
+		// all bytes should have been written, by definition of
+		// Write method in io.Writer
+		if m != nBytes {
+			return n, io.ErrShortWrite
+		}
+	}
+	// Buffer is now empty; reset.
+	b.Truncate(0)
+	return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// bytes.ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+	m := b.grow(1)
+	b.buf[m] = c
+	return nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained.  The return value n is the number of bytes read.  If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+	if b.off >= len(b.buf) {
+		// Buffer is empty, reset to recover space.
+		b.Truncate(0)
+		if len(p) == 0 {
+			return
+		}
+		return 0, io.EOF
+	}
+	n = copy(p, b.buf[b.off:])
+	b.off += n
+	return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+	m := b.Len()
+	if n > m {
+		n = m
+	}
+	data := b.buf[b.off : b.off+n]
+	b.off += n
+	return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+	if b.off >= len(b.buf) {
+		// Buffer is empty, reset to recover space.
+		b.Truncate(0)
+		return 0, io.EOF
+	}
+	c = b.buf[b.off]
+	b.off++
+	return c, nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+	slice, err := b.readSlice(delim)
+	// return a copy of slice. The buffer's backing array may
+	// be overwritten by later calls.
+	line = append(line, slice...)
+	return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+	i := bytes.IndexByte(b.buf[b.off:], delim)
+	end := b.off + i + 1
+	if i < 0 {
+		end = len(b.buf)
+		err = io.EOF
+	}
+	line = b.buf[b.off:end]
+	b.off = end
+	return line, err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents.  It is intended to prepare a Buffer to read existing data.  It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
new file mode 100644
index 0000000..2f3db97
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
@@ -0,0 +1,239 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+type buffer struct {
+	b    []byte
+	miss int
+}
+
+// BufferPool is a 'buffer pool'.
+type BufferPool struct {
+	pool      [6]chan []byte
+	size      [5]uint32
+	sizeMiss  [5]uint32
+	sizeHalf  [5]uint32
+	baseline  [4]int
+	baseline0 int
+
+	mu     sync.RWMutex
+	closed bool
+	closeC chan struct{}
+
+	get     uint32
+	put     uint32
+	half    uint32
+	less    uint32
+	equal   uint32
+	greater uint32
+	miss    uint32
+}
+
+func (p *BufferPool) poolNum(n int) int {
+	if n <= p.baseline0 && n > p.baseline0/2 {
+		return 0
+	}
+	for i, x := range p.baseline {
+		if n <= x {
+			return i + 1
+		}
+	}
+	return len(p.baseline) + 1
+}
+
+// Get returns buffer with length of n.
+func (p *BufferPool) Get(n int) []byte {
+	if p == nil {
+		return make([]byte, n)
+	}
+
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	if p.closed {
+		return make([]byte, n)
+	}
+
+	atomic.AddUint32(&p.get, 1)
+
+	poolNum := p.poolNum(n)
+	pool := p.pool[poolNum]
+	if poolNum == 0 {
+		// Fast path.
+		select {
+		case b := <-pool:
+			switch {
+			case cap(b) > n:
+				if cap(b)-n >= n {
+					atomic.AddUint32(&p.half, 1)
+					select {
+					case pool <- b:
+					default:
+					}
+					return make([]byte, n)
+				} else {
+					atomic.AddUint32(&p.less, 1)
+					return b[:n]
+				}
+			case cap(b) == n:
+				atomic.AddUint32(&p.equal, 1)
+				return b[:n]
+			default:
+				atomic.AddUint32(&p.greater, 1)
+			}
+		default:
+			atomic.AddUint32(&p.miss, 1)
+		}
+
+		return make([]byte, n, p.baseline0)
+	} else {
+		sizePtr := &p.size[poolNum-1]
+
+		select {
+		case b := <-pool:
+			switch {
+			case cap(b) > n:
+				if cap(b)-n >= n {
+					atomic.AddUint32(&p.half, 1)
+					sizeHalfPtr := &p.sizeHalf[poolNum-1]
+					if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
+						atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
+						atomic.StoreUint32(sizeHalfPtr, 0)
+					} else {
+						select {
+						case pool <- b:
+						default:
+						}
+					}
+					return make([]byte, n)
+				} else {
+					atomic.AddUint32(&p.less, 1)
+					return b[:n]
+				}
+			case cap(b) == n:
+				atomic.AddUint32(&p.equal, 1)
+				return b[:n]
+			default:
+				atomic.AddUint32(&p.greater, 1)
+				if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
+					select {
+					case pool <- b:
+					default:
+					}
+				}
+			}
+		default:
+			atomic.AddUint32(&p.miss, 1)
+		}
+
+		if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
+			if size == 0 {
+				atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
+			} else {
+				sizeMissPtr := &p.sizeMiss[poolNum-1]
+				if atomic.AddUint32(sizeMissPtr, 1) == 20 {
+					atomic.StoreUint32(sizePtr, uint32(n))
+					atomic.StoreUint32(sizeMissPtr, 0)
+				}
+			}
+			return make([]byte, n)
+		} else {
+			return make([]byte, n, size)
+		}
+	}
+}
+
+// Put adds given buffer to the pool.
+func (p *BufferPool) Put(b []byte) {
+	if p == nil {
+		return
+	}
+
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	if p.closed {
+		return
+	}
+
+	atomic.AddUint32(&p.put, 1)
+
+	pool := p.pool[p.poolNum(cap(b))]
+	select {
+	case pool <- b:
+	default:
+	}
+
+}
+
+func (p *BufferPool) Close() {
+	if p == nil {
+		return
+	}
+
+	p.mu.Lock()
+	if !p.closed {
+		p.closed = true
+		p.closeC <- struct{}{}
+	}
+	p.mu.Unlock()
+}
+
+func (p *BufferPool) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+
+	return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
+		p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
+}
+
+func (p *BufferPool) drain() {
+	ticker := time.NewTicker(2 * time.Second)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			for _, ch := range p.pool {
+				select {
+				case <-ch:
+				default:
+				}
+			}
+		case <-p.closeC:
+			close(p.closeC)
+			for _, ch := range p.pool {
+				close(ch)
+			}
+			return
+		}
+	}
+}
+
+// NewBufferPool creates a new initialized 'buffer pool'.
+func NewBufferPool(baseline int) *BufferPool {
+	if baseline <= 0 {
+		panic("baseline can't be <= 0")
+	}
+	p := &BufferPool{
+		baseline0: baseline,
+		baseline:  [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
+		closeC:    make(chan struct{}, 1),
+	}
+	for i, cap := range []int{2, 2, 4, 4, 2, 1} {
+		p.pool[i] = make(chan []byte, cap)
+	}
+	go p.drain()
+	return p
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go
new file mode 100644
index 0000000..87d9673
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go
@@ -0,0 +1,369 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+	"bytes"
+	"io"
+	"math/rand"
+	"runtime"
+	"testing"
+)
+
+const N = 10000      // make this bigger for a larger (and slower) test
+var data string      // test data for write tests
+var testBytes []byte // test data; same as data but as a slice.
+
+func init() {
+	testBytes = make([]byte, N)
+	for i := 0; i < N; i++ {
+		testBytes[i] = 'a' + byte(i%26)
+	}
+	data = string(testBytes)
+}
+
+// Verify that contents of buf match the string s.
+func check(t *testing.T, testname string, buf *Buffer, s string) {
+	bytes := buf.Bytes()
+	str := buf.String()
+	if buf.Len() != len(bytes) {
+		t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
+	}
+
+	if buf.Len() != len(str) {
+		t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
+	}
+
+	if buf.Len() != len(s) {
+		t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
+	}
+
+	if string(bytes) != s {
+		t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
+	}
+}
+
+// Fill buf through n writes of byte slice fub.
+// The initial contents of buf corresponds to the string s;
+// the result is the final contents of buf returned as a string.
+func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
+	check(t, testname+" (fill 1)", buf, s)
+	for ; n > 0; n-- {
+		m, err := buf.Write(fub)
+		if m != len(fub) {
+			t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
+		}
+		if err != nil {
+			t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
+		}
+		s += string(fub)
+		check(t, testname+" (fill 4)", buf, s)
+	}
+	return s
+}
+
+func TestNewBuffer(t *testing.T) {
+	buf := NewBuffer(testBytes)
+	check(t, "NewBuffer", buf, data)
+}
+
+// Empty buf through repeated reads into fub.
+// The initial contents of buf corresponds to the string s.
+func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
+	check(t, testname+" (empty 1)", buf, s)
+
+	for {
+		n, err := buf.Read(fub)
+		if n == 0 {
+			break
+		}
+		if err != nil {
+			t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
+		}
+		s = s[n:]
+		check(t, testname+" (empty 3)", buf, s)
+	}
+
+	check(t, testname+" (empty 4)", buf, "")
+}
+
+func TestBasicOperations(t *testing.T) {
+	var buf Buffer
+
+	for i := 0; i < 5; i++ {
+		check(t, "TestBasicOperations (1)", &buf, "")
+
+		buf.Reset()
+		check(t, "TestBasicOperations (2)", &buf, "")
+
+		buf.Truncate(0)
+		check(t, "TestBasicOperations (3)", &buf, "")
+
+		n, err := buf.Write([]byte(data[0:1]))
+		if n != 1 {
+			t.Errorf("wrote 1 byte, but n == %d", n)
+		}
+		if err != nil {
+			t.Errorf("err should always be nil, but err == %s", err)
+		}
+		check(t, "TestBasicOperations (4)", &buf, "a")
+
+		buf.WriteByte(data[1])
+		check(t, "TestBasicOperations (5)", &buf, "ab")
+
+		n, err = buf.Write([]byte(data[2:26]))
+		if n != 24 {
+			t.Errorf("wrote 25 bytes, but n == %d", n)
+		}
+		check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
+
+		buf.Truncate(26)
+		check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
+
+		buf.Truncate(20)
+		check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
+
+		empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
+		empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
+
+		buf.WriteByte(data[1])
+		c, err := buf.ReadByte()
+		if err != nil {
+			t.Error("ReadByte unexpected eof")
+		}
+		if c != data[1] {
+			t.Errorf("ReadByte wrong value c=%v", c)
+		}
+		c, err = buf.ReadByte()
+		if err == nil {
+			t.Error("ReadByte unexpected not eof")
+		}
+	}
+}
+
+func TestLargeByteWrites(t *testing.T) {
+	var buf Buffer
+	limit := 30
+	if testing.Short() {
+		limit = 9
+	}
+	for i := 3; i < limit; i += 3 {
+		s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
+		empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
+	}
+	check(t, "TestLargeByteWrites (3)", &buf, "")
+}
+
+func TestLargeByteReads(t *testing.T) {
+	var buf Buffer
+	for i := 3; i < 30; i += 3 {
+		s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+		empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+	}
+	check(t, "TestLargeByteReads (3)", &buf, "")
+}
+
+func TestMixedReadsAndWrites(t *testing.T) {
+	var buf Buffer
+	s := ""
+	for i := 0; i < 50; i++ {
+		wlen := rand.Intn(len(data))
+		s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
+		rlen := rand.Intn(len(data))
+		fub := make([]byte, rlen)
+		n, _ := buf.Read(fub)
+		s = s[n:]
+	}
+	empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
+}
+
+func TestNil(t *testing.T) {
+	var b *Buffer
+	if b.String() != "<nil>" {
+		t.Errorf("expected <nil>; got %q", b.String())
+	}
+}
+
+func TestReadFrom(t *testing.T) {
+	var buf Buffer
+	for i := 3; i < 30; i += 3 {
+		s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+		var b Buffer
+		b.ReadFrom(&buf)
+		empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
+	}
+}
+
+func TestWriteTo(t *testing.T) {
+	var buf Buffer
+	for i := 3; i < 30; i += 3 {
+		s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+		var b Buffer
+		buf.WriteTo(&b)
+		empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
+	}
+}
+
+func TestNext(t *testing.T) {
+	b := []byte{0, 1, 2, 3, 4}
+	tmp := make([]byte, 5)
+	for i := 0; i <= 5; i++ {
+		for j := i; j <= 5; j++ {
+			for k := 0; k <= 6; k++ {
+				// 0 <= i <= j <= 5; 0 <= k <= 6
+				// Check that if we start with a buffer
+				// of length j at offset i and ask for
+				// Next(k), we get the right bytes.
+				buf := NewBuffer(b[0:j])
+				n, _ := buf.Read(tmp[0:i])
+				if n != i {
+					t.Fatalf("Read %d returned %d", i, n)
+				}
+				bb := buf.Next(k)
+				want := k
+				if want > j-i {
+					want = j - i
+				}
+				if len(bb) != want {
+					t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
+				}
+				for l, v := range bb {
+					if v != byte(l+i) {
+						t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
+					}
+				}
+			}
+		}
+	}
+}
+
+var readBytesTests = []struct {
+	buffer   string
+	delim    byte
+	expected []string
+	err      error
+}{
+	{"", 0, []string{""}, io.EOF},
+	{"a\x00", 0, []string{"a\x00"}, nil},
+	{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
+	{"hello\x01world", 1, []string{"hello\x01"}, nil},
+	{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
+	{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
+	{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
+}
+
+func TestReadBytes(t *testing.T) {
+	for _, test := range readBytesTests {
+		buf := NewBuffer([]byte(test.buffer))
+		var err error
+		for _, expected := range test.expected {
+			var bytes []byte
+			bytes, err = buf.ReadBytes(test.delim)
+			if string(bytes) != expected {
+				t.Errorf("expected %q, got %q", expected, bytes)
+			}
+			if err != nil {
+				break
+			}
+		}
+		if err != test.err {
+			t.Errorf("expected error %v, got %v", test.err, err)
+		}
+	}
+}
+
+func TestGrow(t *testing.T) {
+	x := []byte{'x'}
+	y := []byte{'y'}
+	tmp := make([]byte, 72)
+	for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
+		xBytes := bytes.Repeat(x, startLen)
+		for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
+			buf := NewBuffer(xBytes)
+			// If we read, this affects buf.off, which is good to test.
+			readBytes, _ := buf.Read(tmp)
+			buf.Grow(growLen)
+			yBytes := bytes.Repeat(y, growLen)
+			// Check no allocation occurs in write, as long as we're single-threaded.
+			var m1, m2 runtime.MemStats
+			runtime.ReadMemStats(&m1)
+			buf.Write(yBytes)
+			runtime.ReadMemStats(&m2)
+			if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
+				t.Errorf("allocation occurred during write")
+			}
+			// Check that buffer has correct data.
+			if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
+				t.Errorf("bad initial data at %d %d", startLen, growLen)
+			}
+			if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
+				t.Errorf("bad written data at %d %d", startLen, growLen)
+			}
+		}
+	}
+}
+
+// Was a bug: used to give EOF reading empty slice at EOF.
+func TestReadEmptyAtEOF(t *testing.T) {
+	b := new(Buffer)
+	slice := make([]byte, 0)
+	n, err := b.Read(slice)
+	if err != nil {
+		t.Errorf("read error: %v", err)
+	}
+	if n != 0 {
+		t.Errorf("wrong count; got %d want 0", n)
+	}
+}
+
+// Tests that we occasionally compact. Issue 5154.
+func TestBufferGrowth(t *testing.T) {
+	var b Buffer
+	buf := make([]byte, 1024)
+	b.Write(buf[0:1])
+	var cap0 int
+	for i := 0; i < 5<<10; i++ {
+		b.Write(buf)
+		b.Read(buf)
+		if i == 0 {
+			cap0 = cap(b.buf)
+		}
+	}
+	cap1 := cap(b.buf)
+	// (*Buffer).grow allows for 2x capacity slop before sliding,
+	// so set our error threshold at 3x.
+	if cap1 > cap0*3 {
+		t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
+	}
+}
+
+// From Issue 5154.
+func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
+	buf := make([]byte, 1024)
+	for i := 0; i < b.N; i++ {
+		var b Buffer
+		b.Write(buf[0:1])
+		for i := 0; i < 5<<10; i++ {
+			b.Write(buf)
+			b.Read(buf)
+		}
+	}
+}
+
+// Check that we don't compact too often. From Issue 5154.
+func BenchmarkBufferFullSmallReads(b *testing.B) {
+	buf := make([]byte, 1024)
+	for i := 0; i < b.N; i++ {
+		var b Buffer
+		b.Write(buf)
+		for b.Len()+20 < cap(b.buf) {
+			b.Write(buf[:10])
+		}
+		for i := 0; i < 5<<10; i++ {
+			b.Read(buf[:1])
+			b.Write(buf[:1])
+		}
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go
new file mode 100644
index 0000000..631c9d6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go
@@ -0,0 +1,30 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+	"hash/crc32"
+)
+
+var table = crc32.MakeTable(crc32.Castagnoli)
+
+// CRC is a CRC-32 checksum computed using Castagnoli's polynomial.
+type CRC uint32
+
+// NewCRC creates a new crc based on the given bytes.
+func NewCRC(b []byte) CRC {
+	return CRC(0).Update(b)
+}
+
+// Update updates the crc with the given bytes.
+func (c CRC) Update(b []byte) CRC {
+	return CRC(crc32.Update(uint32(c), table, b))
+}
+
+// Value returns a masked crc.
+func (c CRC) Value() uint32 {
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go
new file mode 100644
index 0000000..5490366
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+	"bytes"
+	"encoding/binary"
+)
+
+// Hash return hash of the given data.
+func Hash(data []byte, seed uint32) uint32 {
+	// Similar to murmur hash
+	var m uint32 = 0xc6a4a793
+	var r uint32 = 24
+	h := seed ^ (uint32(len(data)) * m)
+
+	buf := bytes.NewBuffer(data)
+	for buf.Len() >= 4 {
+		var w uint32
+		binary.Read(buf, binary.LittleEndian, &w)
+		h += w
+		h *= m
+		h ^= (h >> 16)
+	}
+
+	rest := buf.Bytes()
+	switch len(rest) {
+	default:
+		panic("not reached")
+	case 3:
+		h += uint32(rest[2]) << 16
+		fallthrough
+	case 2:
+		h += uint32(rest[1]) << 8
+		fallthrough
+	case 1:
+		h += uint32(rest[0])
+		h *= m
+		h ^= (h >> r)
+	case 0:
+	}
+
+	return h
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
new file mode 100644
index 0000000..1f7fdd4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build go1.3
+
+package util
+
+import (
+	"sync"
+)
+
+type Pool struct {
+	sync.Pool
+}
+
+func NewPool(cap int) *Pool {
+	return &Pool{}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
new file mode 100644
index 0000000..27b8d03
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.3
+
+package util
+
+type Pool struct {
+	pool chan interface{}
+}
+
+func (p *Pool) Get() interface{} {
+	select {
+	case x := <-p.pool:
+		return x
+	default:
+		return nil
+	}
+}
+
+func (p *Pool) Put(x interface{}) {
+	select {
+	case p.pool <- x:
+	default:
+	}
+}
+
+func NewPool(cap int) *Pool {
+	return &Pool{pool: make(chan interface{}, cap)}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
new file mode 100644
index 0000000..8515958
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+// Range is a key range.
+type Range struct {
+	// Start of the key range, include in the range.
+	Start []byte
+
+	// Limit of the key range, not include in the range.
+	Limit []byte
+}
+
+// BytesPrefix returns key range that satisfy the given prefix.
+// This only applicable for the standard 'bytes comparer'.
+func BytesPrefix(prefix []byte) *Range {
+	var limit []byte
+	for i := len(prefix) - 1; i >= 0; i-- {
+		c := prefix[i]
+		if c < 0xff {
+			limit = make([]byte, i+1)
+			copy(limit, prefix)
+			limit[i] = c + 1
+			break
+		}
+	}
+	return &Range{prefix, limit}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
new file mode 100644
index 0000000..f359768
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package util provides utilities used throughout leveldb.
+package util
+
+import (
+	"errors"
+)
+
+var (
+	ErrReleased    = errors.New("leveldb: resource already relesed")
+	ErrHasReleaser = errors.New("leveldb: releaser already defined")
+)
+
+// Releaser is the interface that wraps the basic Release method.
+type Releaser interface {
+	// Release releases associated resources. Release should always success
+	// and can be called multipe times without causing error.
+	Release()
+}
+
+// ReleaseSetter is the interface that wraps the basic SetReleaser method.
+type ReleaseSetter interface {
+	// SetReleaser associates the given releaser to the resources. The
+	// releaser will be called once coresponding resources released.
+	// Calling SetReleaser with nil will clear the releaser.
+	//
+	// This will panic if a releaser already present or coresponding
+	// resource is already released. Releaser should be cleared first
+	// before assigned a new one.
+	SetReleaser(releaser Releaser)
+}
+
+// BasicReleaser provides basic implementation of Releaser and ReleaseSetter.
+type BasicReleaser struct {
+	releaser Releaser
+	released bool
+}
+
+// Released returns whether Release method already called.
+func (r *BasicReleaser) Released() bool {
+	return r.released
+}
+
+// Release implements Releaser.Release.
+func (r *BasicReleaser) Release() {
+	if !r.released {
+		if r.releaser != nil {
+			r.releaser.Release()
+			r.releaser = nil
+		}
+		r.released = true
+	}
+}
+
+// SetReleaser implements ReleaseSetter.SetReleaser.
+func (r *BasicReleaser) SetReleaser(releaser Releaser) {
+	if r.released {
+		panic(ErrReleased)
+	}
+	if r.releaser != nil && releaser != nil {
+		panic(ErrHasReleaser)
+	}
+	r.releaser = releaser
+}
+
+type NoopReleaser struct{}
+
+func (NoopReleaser) Release() {}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
new file mode 100644
index 0000000..9ce3998
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
@@ -0,0 +1,457 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+	"sync/atomic"
+	"unsafe"
+
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type tSet struct {
+	level int
+	table *tFile
+}
+
+type version struct {
+	s *session
+
+	tables []tFiles
+
+	// Level that should be compacted next and its compaction score.
+	// Score < 1 means compaction is not strictly needed. These fields
+	// are initialized by computeCompaction()
+	cLevel int
+	cScore float64
+
+	cSeek unsafe.Pointer
+
+	ref int
+	// Succeeding version.
+	next *version
+}
+
+func newVersion(s *session) *version {
+	return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())}
+}
+
+func (v *version) releaseNB() {
+	v.ref--
+	if v.ref > 0 {
+		return
+	}
+	if v.ref < 0 {
+		panic("negative version ref")
+	}
+
+	tables := make(map[uint64]bool)
+	for _, tt := range v.next.tables {
+		for _, t := range tt {
+			num := t.file.Num()
+			tables[num] = true
+		}
+	}
+
+	for _, tt := range v.tables {
+		for _, t := range tt {
+			num := t.file.Num()
+			if _, ok := tables[num]; !ok {
+				v.s.tops.remove(t)
+			}
+		}
+	}
+
+	v.next.releaseNB()
+	v.next = nil
+}
+
+func (v *version) release() {
+	v.s.vmu.Lock()
+	v.releaseNB()
+	v.s.vmu.Unlock()
+}
+
+func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+	ukey := ikey.ukey()
+
+	// Walk tables level-by-level.
+	for level, tables := range v.tables {
+		if len(tables) == 0 {
+			continue
+		}
+
+		if level == 0 {
+			// Level-0 files may overlap each other. Find all files that
+			// overlap ukey.
+			for _, t := range tables {
+				if t.overlaps(v.s.icmp, ukey, ukey) {
+					if !f(level, t) {
+						return
+					}
+				}
+			}
+		} else {
+			if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) {
+				t := tables[i]
+				if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+					if !f(level, t) {
+						return
+					}
+				}
+			}
+		}
+
+		if lf != nil && !lf(level) {
+			return
+		}
+	}
+}
+
+func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+	ukey := ikey.ukey()
+
+	var (
+		tset  *tSet
+		tseek bool
+
+		// Level-0.
+		zfound bool
+		zseq   uint64
+		zkt    kType
+		zval   []byte
+	)
+
+	err = ErrNotFound
+
+	// Since entries never hope across level, finding key/value
+	// in smaller level make later levels irrelevant.
+	v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+		if !tseek {
+			if tset == nil {
+				tset = &tSet{level, t}
+			} else {
+				tseek = true
+			}
+		}
+
+		var (
+			fikey, fval []byte
+			ferr        error
+		)
+		if noValue {
+			fikey, ferr = v.s.tops.findKey(t, ikey, ro)
+		} else {
+			fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
+		}
+		switch ferr {
+		case nil:
+		case ErrNotFound:
+			return true
+		default:
+			err = ferr
+			return false
+		}
+
+		if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
+			if v.s.icmp.uCompare(ukey, fukey) == 0 {
+				if level == 0 {
+					if fseq >= zseq {
+						zfound = true
+						zseq = fseq
+						zkt = fkt
+						zval = fval
+					}
+				} else {
+					switch fkt {
+					case ktVal:
+						value = fval
+						err = nil
+					case ktDel:
+					default:
+						panic("leveldb: invalid iKey type")
+					}
+					return false
+				}
+			}
+		} else {
+			err = fkerr
+			return false
+		}
+
+		return true
+	}, func(level int) bool {
+		if zfound {
+			switch zkt {
+			case ktVal:
+				value = zval
+				err = nil
+			case ktDel:
+			default:
+				panic("leveldb: invalid iKey type")
+			}
+			return false
+		}
+
+		return true
+	})
+
+	if tseek && tset.table.consumeSeek() <= 0 {
+		tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+	}
+
+	return
+}
+
+func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
+	var tset *tSet
+
+	v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+		if tset == nil {
+			tset = &tSet{level, t}
+			return true
+		} else {
+			if tset.table.consumeSeek() <= 0 {
+				tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+			}
+			return false
+		}
+	}, nil)
+
+	return
+}
+
+func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
+	// Merge all level zero files together since they may overlap
+	for _, t := range v.tables[0] {
+		it := v.s.tops.newIterator(t, slice, ro)
+		its = append(its, it)
+	}
+
+	strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader)
+	for _, tables := range v.tables[1:] {
+		if len(tables) == 0 {
+			continue
+		}
+
+		it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)
+		its = append(its, it)
+	}
+
+	return
+}
+
+func (v *version) newStaging() *versionStaging {
+	return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())}
+}
+
+// Spawn a new version based on this version.
+func (v *version) spawn(r *sessionRecord) *version {
+	staging := v.newStaging()
+	staging.commit(r)
+	return staging.finish()
+}
+
+func (v *version) fillRecord(r *sessionRecord) {
+	for level, ts := range v.tables {
+		for _, t := range ts {
+			r.addTableFile(level, t)
+		}
+	}
+}
+
+func (v *version) tLen(level int) int {
+	return len(v.tables[level])
+}
+
+func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
+	for level, tables := range v.tables {
+		for _, t := range tables {
+			if v.s.icmp.Compare(t.imax, ikey) <= 0 {
+				// Entire file is before "ikey", so just add the file size
+				n += t.size
+			} else if v.s.icmp.Compare(t.imin, ikey) > 0 {
+				// Entire file is after "ikey", so ignore
+				if level > 0 {
+					// Files other than level 0 are sorted by meta->min, so
+					// no further files in this level will contain data for
+					// "ikey".
+					break
+				}
+			} else {
+				// "ikey" falls in the range for this table. Add the
+				// approximate offset of "ikey" within the table.
+				var nn uint64
+				nn, err = v.s.tops.offsetOf(t, ikey)
+				if err != nil {
+					return 0, err
+				}
+				n += nn
+			}
+		}
+	}
+
+	return
+}
+
+func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
+	if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
+		var overlaps tFiles
+		maxLevel := v.s.o.GetMaxMemCompationLevel()
+		for ; level < maxLevel; level++ {
+			if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) {
+				break
+			}
+			overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
+			if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) {
+				break
+			}
+		}
+	}
+
+	return
+}
+
+func (v *version) computeCompaction() {
+	// Precomputed best level for next compaction
+	var bestLevel int = -1
+	var bestScore float64 = -1
+
+	for level, tables := range v.tables {
+		var score float64
+		if level == 0 {
+			// We treat level-0 specially by bounding the number of files
+			// instead of number of bytes for two reasons:
+			//
+			// (1) With larger write-buffer sizes, it is nice not to do too
+			// many level-0 compactions.
+			//
+			// (2) The files in level-0 are merged on every read and
+			// therefore we wish to avoid too many files when the individual
+			// file size is small (perhaps because of a small write-buffer
+			// setting, or very high compression ratios, or lots of
+			// overwrites/deletions).
+			score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger())
+		} else {
+			score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level))
+		}
+
+		if score > bestScore {
+			bestLevel = level
+			bestScore = score
+		}
+	}
+
+	v.cLevel = bestLevel
+	v.cScore = bestScore
+}
+
+func (v *version) needCompaction() bool {
+	return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil
+}
+
+type tablesScratch struct {
+	added   map[uint64]atRecord
+	deleted map[uint64]struct{}
+}
+
+type versionStaging struct {
+	base   *version
+	tables []tablesScratch
+}
+
+func (p *versionStaging) commit(r *sessionRecord) {
+	// Deleted tables.
+	for _, r := range r.deletedTables {
+		tm := &(p.tables[r.level])
+
+		if len(p.base.tables[r.level]) > 0 {
+			if tm.deleted == nil {
+				tm.deleted = make(map[uint64]struct{})
+			}
+			tm.deleted[r.num] = struct{}{}
+		}
+
+		if tm.added != nil {
+			delete(tm.added, r.num)
+		}
+	}
+
+	// New tables.
+	for _, r := range r.addedTables {
+		tm := &(p.tables[r.level])
+
+		if tm.added == nil {
+			tm.added = make(map[uint64]atRecord)
+		}
+		tm.added[r.num] = r
+
+		if tm.deleted != nil {
+			delete(tm.deleted, r.num)
+		}
+	}
+}
+
+func (p *versionStaging) finish() *version {
+	// Build new version.
+	nv := newVersion(p.base.s)
+	for level, tm := range p.tables {
+		btables := p.base.tables[level]
+
+		n := len(btables) + len(tm.added) - len(tm.deleted)
+		if n < 0 {
+			n = 0
+		}
+		nt := make(tFiles, 0, n)
+
+		// Base tables.
+		for _, t := range btables {
+			if _, ok := tm.deleted[t.file.Num()]; ok {
+				continue
+			}
+			if _, ok := tm.added[t.file.Num()]; ok {
+				continue
+			}
+			nt = append(nt, t)
+		}
+
+		// New tables.
+		for _, r := range tm.added {
+			nt = append(nt, p.base.s.tableFileFromRecord(r))
+		}
+
+		// Sort tables.
+		if level == 0 {
+			nt.sortByNum()
+		} else {
+			nt.sortByKey(p.base.s.icmp)
+		}
+		nv.tables[level] = nt
+	}
+
+	// Compute compaction score for new version.
+	nv.computeCompaction()
+
+	return nv
+}
+
+type versionReleaser struct {
+	v    *version
+	once bool
+}
+
+func (vr *versionReleaser) Release() {
+	v := vr.v
+	v.s.vmu.Lock()
+	if !vr.once {
+		v.releaseNB()
+		vr.once = true
+	}
+	v.s.vmu.Unlock()
+}
diff --git a/README.rst b/README.md
similarity index 94%
rename from README.rst
rename to README.md
index 78f8a6d..f998d80 100644
--- a/README.rst
+++ b/README.md
@@ -1,6 +1,5 @@
-+++++++++++++++
 A/I Audit Suite
-+++++++++++++++
+===============
 
 This software implements a distributed collection framework for
 *user audit* logs, which consist of high-level management events
@@ -14,10 +13,9 @@ An interface to query the audit database is also provided, with a very
 simple *key = value* syntax.
 
 
-Overview
-========
+# Overview
 
-An audit *message* is a schema-less JSON dictionary describing a
+A single audit message is a schema-less JSON dictionary describing a
 specific event. The meanings of its attributes are determined by
 convention, but a message must include at least the *user*, *message*
 and *stamp* attributes.
@@ -45,8 +43,7 @@ as follows:
   database.
 
 
-Authentication
-==============
+# Authentication
 
 The suite is meant to be deployed along an X509-based authentication
 infrastructure where a trusted Certification Authority assigns a
diff --git a/debian/control b/debian/control
index ec20109..69180e7 100644
--- a/debian/control
+++ b/debian/control
@@ -2,7 +2,7 @@ Source: audit
 Section: net
 Priority: extra
 Maintainer: ale <ale@incal.net>
-Build-Depends: debhelper (>= 8.0.0), libleveldb-dev, git, rsync, wget
+Build-Depends: debhelper (>= 8.0.0), git, rsync, wget
 Standards-Version: 3.9.4
 Homepage: https://git.autistici.org/ai/audit
 
@@ -14,7 +14,7 @@ Description: Local audit spool daemon.
 
 Package: ai-auditd
 Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, lsb-base, libleveldb1
+Depends: ${shlibs:Depends}, ${misc:Depends}, lsb-base
 Description: Central audit server.
  Remote part of the A/I audit suite.
 
diff --git a/debian/rules b/debian/rules
index de8fd35..9919791 100755
--- a/debian/rules
+++ b/debian/rules
@@ -20,8 +20,6 @@ GOROOT = $(CURDIR)/debian/build-go
 override_dh_install:
 	# Build the sources using a locally downloaded Go version.
 	# This is crazy, clearly, but it's a temporary workaround for wheezy.
-	# Also, we redefine 'leveldb_free' to 'free' because the LevelDB
-	# version in wheezy does not have that symbol.
 	install -m 755 -o root -g root -d $(DEBDIR)/auditc/usr/bin
 	install -m 755 -o root -g root -d $(DEBDIR)/ai-auditd/usr/sbin
 	install -m 755 -o root -g root -d $(DEBDIR)/localauditd/usr/sbin
@@ -31,7 +29,7 @@ override_dh_install:
 	 rsync -a --exclude=debian --exclude=.git $(CURDIR)/ $(BUILDDIR)/src/$(GOPKG)/ ; \
 	 cd $(BUILDDIR)/src && \
 	 $(GOROOT)/bin/go get -d -v ./$(GOPKG)/... && \
-	 CGO_CFLAGS=-Dleveldb_free=free $(GOROOT)/bin/go install -v ./$(GOPKG)/... && \
+	 $(GOROOT)/bin/go install -v ./$(GOPKG)/... && \
 	 install -m 755 -o root -g root $(BUILDDIR)/bin/auditd $(DEBDIR)/ai-auditd/usr/sbin/auditd && \
 	 install -m 755 -o root -g root $(BUILDDIR)/bin/auditd-dbtool $(DEBDIR)/ai-auditd/usr/sbin/auditd-dbtool && \
 	 install -m 755 -o root -g root $(BUILDDIR)/bin/localauditd $(DEBDIR)/localauditd/usr/sbin/localauditd && \
diff --git a/server/db.go b/server/db.go
index ad80a93..6a8da7d 100644
--- a/server/db.go
+++ b/server/db.go
@@ -9,10 +9,10 @@ import (
 	"strings"
 
 	"git.autistici.org/ai/audit"
-
-	// Use the backwards-compatible version of Levigo
-	// on Debian wheezy systems (due to their old LevelDB).
-	"github.com/jmhodges/levigo_leveldb_1.4"
+	"git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb"
+	ldbfilter "git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
+	ldbopt "git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
+	ldbutil "git.autistici.org/ai/audit/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
 )
 
 // DB is a very simple indexed database implementation using LevelDB.
@@ -35,9 +35,7 @@ import (
 // null byte (which is used internally as the primary key separator).
 //
 type DB struct {
-	leveldb      *levigo.DB
-	cache        *levigo.Cache
-	filter       *levigo.FilterPolicy
+	leveldb      *leveldb.DB
 	excludedKeys map[string]struct{}
 }
 
@@ -62,28 +60,24 @@ func NewDB(path string, dbopts *DBOptions) *DB {
 	}
 
 	// Set LevelDB options and create the database.
-	opts := levigo.NewOptions()
-	cache := levigo.NewLRUCache(dbopts.LRUCacheSize)
-	opts.SetCache(cache)
-	opts.SetCreateIfMissing(true)
-	filter := levigo.NewBloomFilter(dbopts.BloomFilterSizeExp)
-	opts.SetFilterPolicy(filter)
-
-	leveldb, err := levigo.Open(path, opts)
+	opts := &ldbopt.Options{
+		BlockCacheCapacity: dbopts.LRUCacheSize,
+		Filter:             ldbfilter.NewBloomFilter(dbopts.BloomFilterSizeExp),
+	}
+
+	ldb, err := leveldb.OpenFile(path, opts)
 	if err != nil {
-		log.Fatalf("Error creating database: %s", err)
+		log.Fatalf("Error opening database: %s", err)
 	}
 
 	db := &DB{
-		leveldb:      leveldb,
-		cache:        cache,
-		filter:       filter,
+		leveldb:      ldb,
 		excludedKeys: make(map[string]struct{}),
 	}
 
 	// Exclude some keys by default.
 	db.excludedKeys["message"] = struct{}{}
-	db.excludedKeys["stamp"] = struct{}{}     // old field for timestamp
+	db.excludedKeys["stamp"] = struct{}{} // old field for timestamp
 	db.excludedKeys["timestamp"] = struct{}{}
 	for _, key := range dbopts.ExcludedKeys {
 		db.excludedKeys[key] = struct{}{}
@@ -94,8 +88,6 @@ func NewDB(path string, dbopts *DBOptions) *DB {
 // Close the database and release associated resources.
 func (db *DB) Close() {
 	db.leveldb.Close()
-	db.cache.Close()
-	db.filter.Close()
 }
 
 func (db *DB) isExcluded(key string) bool {
@@ -122,7 +114,7 @@ func makePrefixKeyRange(prefix []byte) ([]byte, []byte) {
 	return startKey, endKey
 }
 
-func (db *DB) writeToBatch(msg audit.Message, wb *levigo.WriteBatch) {
+func (db *DB) writeToBatch(msg audit.Message, wb *leveldb.Batch) {
 	// Generate a unique ID for the message.
 	msgid := audit.NewUniqueId(msg.Stamp())
 	wb.Put(makeKey("messages", msgid), msg.ToJSON())
@@ -144,30 +136,26 @@ func (db *DB) writeToBatch(msg audit.Message, wb *levigo.WriteBatch) {
 
 // Write a Message to the database.
 func (db *DB) Write(msg audit.Message) error {
-	wb := levigo.NewWriteBatch()
-	defer wb.Close()
-
+	wb := new(leveldb.Batch)
 	db.writeToBatch(msg, wb)
-
-	wo := levigo.NewWriteOptions()
-	defer wo.Close()
-	return db.leveldb.Write(wo, wb)
+	return db.leveldb.Write(wb, nil)
 }
 
 // Dump contents to a writer.
 func (db *DB) Dump(w io.Writer) error {
-	ro := levigo.NewReadOptions()
-	ro.SetFillCache(false)
-	defer ro.Close()
-
-	i := db.leveldb.NewIterator(ro)
-	defer i.Close()
-
 	startKey, endKey := makePrefixKeyRange(makeKey("messages"))
-	for i.Seek(startKey); i.Valid() && bytes.Compare(i.Key(), endKey) < 0; i.Next() {
+	i := db.leveldb.NewIterator(
+		&ldbutil.Range{
+			Start: startKey,
+			Limit: endKey,
+		},
+		&ldbopt.ReadOptions{DontFillCache: true},
+	)
+	for i.Next() {
 		fmt.Fprintf(w, "%s\n", i.Value())
 	}
-	if err := i.GetError(); err != nil {
+	i.Release()
+	if err := i.Error(); err != nil {
 		return err
 	}
 	return nil
@@ -175,8 +163,7 @@ func (db *DB) Dump(w io.Writer) error {
 
 // Restore contents from a dump.
 func (db *DB) Restore(r io.Reader) error {
-	wb := levigo.NewWriteBatch()
-	defer wb.Close()
+	wb := new(leveldb.Batch)
 
 	errs := 0
 	scanner := bufio.NewScanner(r)
@@ -195,9 +182,7 @@ func (db *DB) Restore(r io.Reader) error {
 		return fmt.Errorf("failed to restore %d records", errs)
 	}
 
-	wo := levigo.NewWriteOptions()
-	defer wo.Close()
-	return db.leveldb.Write(wo, wb)
+	return db.leveldb.Write(wb, nil)
 }
 
 type keySet map[string]struct{}
@@ -231,21 +216,21 @@ func (r keySet) Intersect(other keySet) keySet {
 // Query a key/value index. This will load *all* matching keys
 // in memory, which might take quite a lot of space.
 func (db *DB) queryIndex(key, value string) (keySet, error) {
-	ro := levigo.NewReadOptions()
-	ro.SetFillCache(false)
-	defer ro.Close()
-
-	i := db.leveldb.NewIterator(ro)
-	defer i.Close()
-
 	startKey, endKey := makePrefixKeyRange(makeKey("index", []byte(key), []byte(value)))
-	i.Seek(startKey)
+	i := db.leveldb.NewIterator(
+		&ldbutil.Range{
+			Start: startKey,
+			Limit: endKey,
+		},
+		&ldbopt.ReadOptions{DontFillCache: true},
+	)
 
 	keys := make(keySet)
-	for ; i.Valid() && bytes.Compare(i.Key(), endKey) < 0; i.Next() {
+	for i.Next() {
 		keys.Add(i.Value())
 	}
-	if err := i.GetError(); err != nil {
+	i.Release()
+	if err := i.Error(); err != nil {
 		return nil, err
 	}
 	return keys, nil
@@ -253,13 +238,10 @@ func (db *DB) queryIndex(key, value string) (keySet, error) {
 
 // Retrieve a bunch of messages from the db.
 func (db *DB) getMessages(set keySet) ([]audit.Message, error) {
-	ro := levigo.NewReadOptions()
-	defer ro.Close()
-
 	var lastErr error
 	result := make([]audit.Message, 0, len(set))
 	for key, _ := range set {
-		data, err := db.leveldb.Get(ro, []byte(key))
+		data, err := db.leveldb.Get([]byte(key), nil)
 		if err != nil {
 			lastErr = err
 			continue
-- 
GitLab