Newer
Older
"encoding/binary"
"encoding/json"
"errors"
"math/rand"
"os"
"path/filepath"
"strconv"
"time"
"github.com/blevesearch/bleve"
"github.com/jmhodges/levigo"
)
var (
BookBucket = []byte("ebook")
FileBucket = []byte("file")
BookFileBucket = []byte("ebook_file")
keySeparator = byte('/')
)
type BookId uint64
func (id BookId) String() string {
return strconv.FormatUint(uint64(id), 10)
}
func (id BookId) Key() []byte {
var buf bytes.Buffer
binary.Write(&buf, binary.LittleEndian, id)
return buf.Bytes()
}
func init() {
// Seed the RNG to a random value.
var seed int64
binary.Read(cryptorand.Reader, binary.LittleEndian, &seed)
rand.Seed(seed)
}
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
func NewID() BookId {
return BookId(rand.Int63())
}
func ParseID(s string) BookId {
id, _ := strconv.ParseUint(s, 10, 64)
return BookId(id)
}
func metadataDocumentMapping() *bleve.DocumentMapping {
md := bleve.NewDocumentMapping()
titleFieldMapping := bleve.NewTextFieldMapping()
titleFieldMapping.Analyzer = "en"
titleFieldMapping.Store = false
md.AddFieldMappingsAt("Title", titleFieldMapping)
authorFieldMapping := bleve.NewTextFieldMapping()
authorFieldMapping.Store = false
md.AddFieldMappingsAt("Creator", authorFieldMapping)
nostoreFieldMapping := bleve.NewTextFieldMapping()
nostoreFieldMapping.Store = false
nostoreFieldMapping.IncludeInAll = false
md.AddFieldMappingsAt("Description", nostoreFieldMapping)
md.AddFieldMappingsAt("ISBN", nostoreFieldMapping)
for _, ignore := range []string{"Sources", "Date", "Publisher", "Format", "Keywords", "Language"} {
md.AddSubDocumentMapping(ignore, bleve.NewDocumentDisabledMapping())
}
return md
}
func defaultIndexMapping() *bleve.IndexMapping {
i := bleve.NewIndexMapping()
i.AddDocumentMapping("ebook", metadataDocumentMapping())
i.DefaultAnalyzer = "en"
return i
}
type Book struct {
CoverPath string
Metadata *Metadata
}
func (b *Book) Type() string {
return "ebook"
}
type File struct {
Path string
FileType string
Mtime time.Time
Size int64
Error bool
Id BookId
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
}
func (f *File) HasChanged(info os.FileInfo) bool {
return !info.ModTime().Equal(f.Mtime) || info.Size() != f.Size
}
type Database struct {
leveldb *levigo.DB
leveldbCache *levigo.Cache
leveldbFilter *levigo.FilterPolicy
index bleve.Index
}
func NewDb(path string) (*Database, error) {
// Make sure that path exists.
if _, err := os.Stat(path); err != nil {
if err := os.Mkdir(path, 0700); err != nil {
return nil, err
}
}
// Initialize our database and the index.
d := &Database{}
if err := d.setupLevelDb(filepath.Join(path, "db")); err != nil {
return nil, err
}
if err := d.setupIndex(filepath.Join(path, "index")); err != nil {
return nil, err
}
return d, nil
}
func (db *Database) setupLevelDb(path string) error {
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
opts := levigo.NewOptions()
db.leveldbCache = levigo.NewLRUCache(2 << 28)
opts.SetCache(db.leveldbCache)
db.leveldbFilter = levigo.NewBloomFilter(10)
opts.SetFilterPolicy(db.leveldbFilter)
opts.SetCreateIfMissing(true)
leveldb, err := levigo.Open(path, opts)
if err != nil {
return err
}
db.leveldb = leveldb
return nil
}
func (db *Database) setupIndex(path string) error {
var err error
if _, serr := os.Stat(path); serr == nil {
db.index, err = bleve.Open(path)
} else {
db.index, err = bleve.New(path, defaultIndexMapping())
}
if err != nil {
return err
}
return nil
}
func (db *Database) Close() {
db.index.Close()
db.leveldb.Close()
db.leveldbCache.Close()
db.leveldbFilter.Close()
}
func (db *Database) GetBook(bookid BookId) (*Book, error) {
var b Book
if err := db.Get(BookBucket, bookid.Key(), &b); err != nil {
return nil, err
}
return &b, nil
}
func (db *Database) GetFile(path string) (*File, error) {
var f File
if err := db.Get(FileBucket, []byte(path), &f); err != nil {
return nil, err
}
return &f, nil
}
func (db *Database) GetBookFiles(bookid BookId) ([]*File, error) {
ro := levigo.NewReadOptions()
defer ro.Close()
it := db.leveldb.NewIterator(ro)
defer it.Close()
start, end := keyRange(bktToKey(BookFileBucket, bookid.Key()))
var out []*File
for it.Seek(start); it.Valid() && bytes.Compare(it.Key(), end) < 0; it.Next() {
var filepath string
if json.Unmarshal(it.Value(), &filepath) == nil {
if file, err := db.GetFile(filepath); err == nil {
out = append(out, file)
}
}
}
return out, nil
}
func (db *Database) Get(bucket, key []byte, obj interface{}) error {
ro := levigo.NewReadOptions()
defer ro.Close()
data, err := db.leveldb.Get(ro, bktToKey(bucket, key))
if err != nil {
return err
}
return json.Unmarshal(data, obj)
}
func (db *Database) PutBook(b *Book) error {
if err := db.Put(BookBucket, b.Id.Key(), b); err != nil {
return err
}
return db.index.Index(b.Id.String(), b.Metadata)
}
func fileBookKey(path string, bookid BookId) []byte {
return bytes.Join([][]byte{bookid.Key(), []byte(path)}, []byte{keySeparator})
}
if err := db.Put(FileBucket, []byte(f.Path), f); err != nil {
return err
}
if !f.Error {
return db.Put(BookFileBucket, fileBookKey(f.Path, f.Id), f.Path)
}
return nil
}
func (db *Database) Put(bucket, key []byte, obj interface{}) error {
data, err := json.Marshal(obj)
if err != nil {
return err
}
wo := levigo.NewWriteOptions()
defer wo.Close()
return db.leveldb.Put(wo, bktToKey(bucket, key), data)
}
func (db *Database) DeleteBook(bookid BookId) error {
db.Delete(BookBucket, bookid.Key())
return db.index.Delete(bookid.String())
}
func (db *Database) DeleteFile(path string) error {
f, err := db.GetFile(path)
if err != nil {
return nil
}
db.Delete(FileBucket, []byte(path))
db.Delete(BookFileBucket, fileBookKey(path, f.Id))
// Delete the book if there are no files left.
if files, err := db.GetBookFiles(f.Id); err == nil && len(files) == 0 {
db.DeleteBook(f.Id)
}
return nil
}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
func (db *Database) Delete(bucket, key []byte) error {
wo := levigo.NewWriteOptions()
defer wo.Close()
return db.leveldb.Delete(wo, bktToKey(bucket, key))
}
type DatabaseIterator struct {
db *levigo.DB
snap *levigo.Snapshot
iter *levigo.Iterator
ro *levigo.ReadOptions
end []byte
}
func (i *DatabaseIterator) Close() {
i.iter.Close()
i.ro.Close()
i.db.ReleaseSnapshot(i.snap)
}
func (i *DatabaseIterator) Next() {
i.iter.Next()
}
func (i *DatabaseIterator) Valid() bool {
return i.iter.Valid() && (bytes.Compare(i.iter.Key(), i.end) < 0)
}
func (i *DatabaseIterator) Id() BookId {
return keyToId(i.iter.Key())
}
func (i *DatabaseIterator) Value(obj interface{}) error {
return json.Unmarshal(i.iter.Value(), obj)
}
// Scan an entire bucket.
func (db *Database) Scan(bucket []byte) *DatabaseIterator {
snap := db.leveldb.NewSnapshot()
ro := levigo.NewReadOptions()
ro.SetFillCache(false)
ro.SetSnapshot(snap)
it := db.leveldb.NewIterator(ro)
start, end := keyRange(bucket)
it.Seek(start)
return &DatabaseIterator{
db: db.leveldb,
snap: snap,
ro: ro,
iter: it,
end: end,
}
}
type SearchResult struct {
Results []*Book
NumResults int
}
func (db *Database) doSearch(query bleve.Query, offset, limit int) (*SearchResult, error) {
req := bleve.NewSearchRequestOptions(query, limit, offset, false)
result, err := db.index.Search(req)
if err != nil {
return nil, err
}
sr := SearchResult{NumResults: int(result.Total)}
for _, r := range result.Hits {
if book, err := db.GetBook(ParseID(r.ID)); err == nil {
sr.Results = append(sr.Results, book)
}
}
return &sr, nil
}
// Search the database with a query string.
func (db *Database) Search(queryStr string, offset, limit int) (*SearchResult, error) {
return db.doSearch(bleve.NewQueryStringQuery(queryStr), offset, limit)
}
// Autocomplete runs a fuzzy search for a term.
func (db *Database) Autocomplete(term string) (*SearchResult, error) {
return db.doSearch(bleve.NewFuzzyQuery(term), 0, 20)
}
// Find a book matching the given metadata, if possible.
func (db *Database) Find(m *Metadata) (*Book, error) {
var query bleve.Query
if len(m.ISBN) > 0 {
var queries []bleve.Query
for _, isbn := range m.ISBN {
q := bleve.NewTermQuery(isbn)
q.SetField("ISBN")
queries = append(queries, q)
}
query = bleve.NewDisjunctionQuery(queries)
} else {
var queries []bleve.Query
if m.Title != "" {
q := bleve.NewMatchQuery(m.Title)
q.SetField("Title")
queries = append(queries, q)
}
if len(m.Creator) > 0 {
for _, a := range m.Creator {
q := bleve.NewMatchQuery(a)
q.SetField("Creator")
queries = append(queries, q)
}
}
if len(queries) == 0 {
return nil, errors.New("insufficient metadata for query")
}
query = bleve.NewConjunctionQuery(queries)
}
search := bleve.NewSearchRequest(query)
result, err := db.index.Search(search)
if err != nil {
return nil, err
}
for _, r := range result.Hits {
book, err := db.GetBook(ParseID(r.ID))
if err != nil {
continue
}
if book.Metadata.Equals(m) {
return book, nil
}
}
return nil, errors.New("no matches found")
}
func bktToKey(bucket, key []byte) []byte {
return bytes.Join([][]byte{bucket, key}, []byte{keySeparator})
}
// Input is a full key (including bucket).
func keyToId(key []byte) BookId {
n := bytes.Index(key, []byte{keySeparator})
if n < 0 {
return 0
}
var id uint64
binary.Read(bytes.NewReader(key[n+1:]), binary.LittleEndian, &id)
return BookId(id)
}
func keyRange(prefix []byte) ([]byte, []byte) {
start := make([]byte, len(prefix)+1)
end := make([]byte, len(prefix)+1)
copy(start, prefix)
copy(end, prefix)
start[len(prefix)] = keySeparator
end[len(prefix)] = keySeparator + 1
return start, end
}