From 46f484cc52321849973266aa9a8ce8cb3ff3981f Mon Sep 17 00:00:00 2001 From: renovate Date: Tue, 24 Jan 2023 21:28:55 +0000 Subject: [PATCH] Update module github.com/blevesearch/bleve/v2 to v2.3.6 --- go.mod | 8 +- go.sum | 49 + .../RoaringBitmap/roaring/.gitignore | 1 - .../RoaringBitmap/roaring/.travis.yml | 2 +- .../github.com/RoaringBitmap/roaring/Makefile | 6 +- .../RoaringBitmap/roaring/README.md | 10 +- .../RoaringBitmap/roaring/arraycontainer.go | 35 +- .../roaring/arraycontainer_gen.go | 134 - .../RoaringBitmap/roaring/bitmapcontainer.go | 37 +- .../roaring/bitmapcontainer_gen.go | 415 -- .../RoaringBitmap/roaring/byte_input.go | 161 - .../RoaringBitmap/roaring/fastaggregation.go | 33 +- .../github.com/RoaringBitmap/roaring/go.mod | 13 +- .../github.com/RoaringBitmap/roaring/go.sum | 48 +- .../roaring/internal/byte_input.go | 166 + .../RoaringBitmap/roaring/internal/pools.go | 21 + .../RoaringBitmap/roaring/parallel.go | 9 +- .../RoaringBitmap/roaring/roaring.go | 119 +- .../RoaringBitmap/roaring/roaringarray.go | 158 +- .../RoaringBitmap/roaring/roaringarray_gen.go | 529 --- .../RoaringBitmap/roaring/runcontainer.go | 569 +-- .../RoaringBitmap/roaring/runcontainer_gen.go | 1104 ----- .../RoaringBitmap/roaring/serialization.go | 15 - .../roaring/serialization_littleendian.go | 283 ++ .../github.com/RoaringBitmap/roaring/smat.go | 2 +- .../bitset/.gitignore | 0 .../bitset/.travis.yml | 0 .../{willf => bits-and-blooms}/bitset/LICENSE | 0 .../bitset/README.md | 11 +- .../bitset/azure-pipelines.yml | 0 .../bitset/bitset.go | 23 +- .../github.com/bits-and-blooms/bitset/go.mod | 3 + .../{willf => bits-and-blooms}/bitset/go.sum | 0 .../bitset/popcnt.go | 0 .../bitset/popcnt_19.go | 0 .../bitset/popcnt_amd64.go | 0 .../bitset/popcnt_amd64.s | 0 .../bitset/popcnt_generic.go | 0 .../bitset/trailing_zeros_18.go | 0 .../bitset/trailing_zeros_19.go | 0 .../blevesearch/bleve/v2/.gitignore | 1 + .../github.com/blevesearch/bleve/v2/README.md | 62 +- .../blevesearch/bleve/v2/SECURITY.md | 15 + .../v2/analysis/analyzer/custom/custom.go | 4 +- .../v2/analysis/analyzer/keyword/keyword.go | 4 +- .../v2/analysis/analyzer/simple/simple.go | 4 +- .../v2/analysis/analyzer/standard/standard.go | 4 +- .../bleve/v2/analysis/lang/en/analyzer_en.go | 4 +- .../blevesearch/bleve/v2/analysis/type.go | 9 +- .../blevesearch/bleve/v2/config_app.go | 1 + .../blevesearch/bleve/v2/config_disk.go | 1 + .../blevesearch/bleve/v2/document/document.go | 11 +- .../bleve/v2/document/field_geopoint.go | 53 +- .../bleve/v2/document/field_geoshape.go | 235 + .../blevesearch/bleve/v2/document/field_ip.go | 132 + .../bleve/v2/document/field_text.go | 8 +- .../blevesearch/bleve/v2/geo/README.md | 268 ++ .../bleve/v2/geo/geo_s2plugin_impl.go | 450 ++ .../blevesearch/bleve/v2/geo/parse.go | 277 ++ vendor/github.com/blevesearch/bleve/v2/go.mod | 49 +- vendor/github.com/blevesearch/bleve/v2/go.sum | 133 +- .../github.com/blevesearch/bleve/v2/index.go | 13 + .../bleve/v2/index/scorch/builder.go | 4 +- .../bleve/v2/index/scorch/empty.go | 8 + .../bleve/v2/index/scorch/introducer.go | 40 +- .../bleve/v2/index/scorch/merge.go | 57 +- .../bleve/v2/index/scorch/persister.go | 80 +- .../bleve/v2/index/scorch/scorch.go | 65 +- .../bleve/v2/index/scorch/segment_plugin.go | 10 + .../bleve/v2/index/scorch/snapshot_index.go | 341 +- .../v2/index/scorch/snapshot_index_dict.go | 11 +- .../v2/index/scorch/snapshot_index_tfr.go | 28 +- .../bleve/v2/index/scorch/snapshot_segment.go | 7 +- .../bleve/v2/index/scorch/stats.go | 3 + .../bleve/v2/index/scorch/unadorned.go | 25 +- .../bleve/v2/index/upsidedown/field_dict.go | 4 + .../bleve/v2/index/upsidedown/index_reader.go | 5 +- .../index/upsidedown/store/goleveldb/batch.go | 2 +- .../upsidedown/store/goleveldb/config.go | 4 +- .../upsidedown/store/goleveldb/iterator.go | 2 +- .../upsidedown/store/goleveldb/reader.go | 4 +- .../index/upsidedown/store/goleveldb/store.go | 6 +- .../upsidedown/store/goleveldb/writer.go | 2 +- .../index/upsidedown/store/gtreap/iterator.go | 2 +- .../index/upsidedown/store/gtreap/reader.go | 2 +- .../v2/index/upsidedown/store/gtreap/store.go | 2 +- .../bleve/v2/index/upsidedown/upsidedown.go | 2 + .../blevesearch/bleve/v2/index_alias_impl.go | 4 + .../blevesearch/bleve/v2/index_impl.go | 91 +- .../blevesearch/bleve/v2/index_meta.go | 19 + .../blevesearch/bleve/v2/mapping.go | 14 + .../blevesearch/bleve/v2/mapping/document.go | 21 +- .../blevesearch/bleve/v2/mapping/field.go | 104 +- .../blevesearch/bleve/v2/mapping/index.go | 2 +- .../blevesearch/bleve/v2/mapping/mapping.go | 2 +- .../github.com/blevesearch/bleve/v2/query.go | 9 + .../blevesearch/bleve/v2/registry/analyzer.go | 10 +- .../blevesearch/bleve/v2/registry/registry.go | 4 +- .../github.com/blevesearch/bleve/v2/search.go | 24 +- .../bleve/v2/search/collector/topn.go | 18 +- .../v2/search/facet/facet_builder_datetime.go | 32 +- .../v2/search/facet/facet_builder_numeric.go | 32 +- .../v2/search/facet/facet_builder_terms.go | 22 +- .../bleve/v2/search/facets_builder.go | 116 +- .../highlight/fragmenter/simple/simple.go | 12 +- .../bleve/v2/search/query/bool_field.go | 6 +- .../bleve/v2/search/query/boolean.go | 13 +- .../bleve/v2/search/query/conjunction.go | 7 +- .../bleve/v2/search/query/date_range.go | 5 +- .../bleve/v2/search/query/disjunction.go | 7 +- .../bleve/v2/search/query/docid.go | 6 +- .../bleve/v2/search/query/fuzzy.go | 6 +- .../bleve/v2/search/query/geo_boundingbox.go | 11 +- .../v2/search/query/geo_boundingpolygon.go | 5 +- .../bleve/v2/search/query/geo_distance.go | 5 +- .../bleve/v2/search/query/geo_shape.go | 135 + .../bleve/v2/search/query/ip_range.go | 85 + .../bleve/v2/search/query/match.go | 9 +- .../bleve/v2/search/query/match_all.go | 5 +- .../bleve/v2/search/query/match_none.go | 3 +- .../bleve/v2/search/query/match_phrase.go | 7 +- .../bleve/v2/search/query/multi_phrase.go | 5 +- .../bleve/v2/search/query/numeric_range.go | 5 +- .../bleve/v2/search/query/phrase.go | 5 +- .../bleve/v2/search/query/prefix.go | 6 +- .../bleve/v2/search/query/query.go | 24 +- .../bleve/v2/search/query/query_string.go | 6 +- .../bleve/v2/search/query/query_string_lex.go | 14 +- .../bleve/v2/search/query/regexp.go | 5 +- .../blevesearch/bleve/v2/search/query/term.go | 6 +- .../bleve/v2/search/query/term_range.go | 5 +- .../bleve/v2/search/query/wildcard.go | 5 +- .../bleve/v2/search/scorer/scorer_term.go | 1 - .../blevesearch/bleve/v2/search/search.go | 6 +- .../v2/search/searcher/search_boolean.go | 3 +- .../v2/search/searcher/search_conjunction.go | 8 +- .../v2/search/searcher/search_disjunction.go | 16 +- .../searcher/search_disjunction_heap.go | 5 +- .../searcher/search_disjunction_slice.go | 5 +- .../bleve/v2/search/searcher/search_docid.go | 3 +- .../bleve/v2/search/searcher/search_filter.go | 3 +- .../bleve/v2/search/searcher/search_fuzzy.go | 54 +- .../search/searcher/search_geoboundingbox.go | 44 +- .../searcher/search_geopointdistance.go | 56 +- .../v2/search/searcher/search_geopolygon.go | 72 +- .../v2/search/searcher/search_geoshape.go | 124 + .../v2/search/searcher/search_ip_range.go | 68 + .../v2/search/searcher/search_match_all.go | 4 +- .../v2/search/searcher/search_multi_term.go | 42 +- .../search/searcher/search_numeric_range.go | 24 +- .../bleve/v2/search/searcher/search_phrase.go | 15 +- .../bleve/v2/search/searcher/search_regexp.go | 43 +- .../bleve/v2/search/searcher/search_term.go | 9 +- .../v2/search/searcher/search_term_prefix.go | 10 +- .../v2/search/searcher/search_term_range.go | 10 +- .../blevesearch/bleve/v2/search/sort.go | 3 +- .../blevesearch/bleve_index_api/directory.go | 23 + .../blevesearch/bleve_index_api/document.go | 15 + .../blevesearch/bleve_index_api/go.mod | 2 +- .../blevesearch/bleve_index_api/index.go | 17 +- .../bleve_index_api/spatial_plugin.go | 47 + vendor/github.com/blevesearch/geo/LICENSE | 202 + .../geo/geojson/geojson_s2_util.go | 319 ++ .../geo/geojson/geojson_shapes_impl.go | 1861 ++++++++ .../geo/geojson/geojson_shapes_util.go | 586 +++ .../blevesearch/geo/s2/bits_go18.go | 54 + .../blevesearch/geo/s2/bits_go19.go | 40 + vendor/github.com/blevesearch/geo/s2/cap.go | 519 +++ vendor/github.com/blevesearch/geo/s2/cell.go | 698 +++ .../blevesearch/geo/s2/cell_index.go | 584 +++ .../github.com/blevesearch/geo/s2/cellid.go | 944 ++++ .../blevesearch/geo/s2/cellunion.go | 590 +++ .../blevesearch/geo/s2/centroids.go | 133 + .../geo/s2/contains_point_query.go | 190 + .../geo/s2/contains_vertex_query.go | 63 + .../blevesearch/geo/s2/convex_hull_query.go | 258 ++ .../blevesearch/geo/s2/crossing_edge_query.go | 409 ++ .../blevesearch/geo/s2/distance_target.go | 149 + vendor/github.com/blevesearch/geo/s2/doc.go | 29 + .../blevesearch/geo/s2/edge_clipping.go | 672 +++ .../blevesearch/geo/s2/edge_crosser.go | 227 + .../blevesearch/geo/s2/edge_crossings.go | 396 ++ .../blevesearch/geo/s2/edge_distances.go | 408 ++ .../blevesearch/geo/s2/edge_query.go | 816 ++++ .../blevesearch/geo/s2/edge_tessellator.go | 291 ++ .../github.com/blevesearch/geo/s2/encode.go | 224 + .../blevesearch/geo/s2/interleave.go | 143 + .../github.com/blevesearch/geo/s2/latlng.go | 101 + .../github.com/blevesearch/geo/s2/lexicon.go | 175 + vendor/github.com/blevesearch/geo/s2/loop.go | 1833 ++++++++ .../blevesearch/geo/s2/matrix3x3.go | 127 + .../geo/s2/max_distance_targets.go | 306 ++ .../github.com/blevesearch/geo/s2/metric.go | 164 + .../geo/s2/min_distance_targets.go | 362 ++ .../blevesearch/geo/s2/nthderivative.go | 88 + .../blevesearch/geo/s2/paddedcell.go | 252 + vendor/github.com/blevesearch/geo/s2/point.go | 258 ++ .../blevesearch/geo/s2/point_measures.go | 149 + .../blevesearch/geo/s2/point_vector.go | 42 + .../blevesearch/geo/s2/pointcompression.go | 319 ++ .../github.com/blevesearch/geo/s2/polygon.go | 1229 +++++ .../github.com/blevesearch/geo/s2/polyline.go | 589 +++ .../blevesearch/geo/s2/polyline_measures.go | 53 + .../blevesearch/geo/s2/predicates.go | 701 +++ .../blevesearch/geo/s2/projections.go | 241 + .../blevesearch/geo/s2/query_entry.go | 93 + .../blevesearch/geo/s2/query_options.go | 196 + vendor/github.com/blevesearch/geo/s2/rect.go | 726 +++ .../blevesearch/geo/s2/rect_bounder.go | 352 ++ .../github.com/blevesearch/geo/s2/region.go | 71 + .../blevesearch/geo/s2/region_term_indexer.go | 441 ++ .../blevesearch/geo/s2/regioncoverer.go | 615 +++ .../blevesearch/geo/s2/regionunion.go | 66 + vendor/github.com/blevesearch/geo/s2/shape.go | 263 ++ .../blevesearch/geo/s2/shapeindex.go | 1526 ++++++ .../blevesearch/geo/s2/shapeutil.go | 228 + .../geo/s2/shapeutil_edge_iterator.go | 72 + vendor/github.com/blevesearch/geo/s2/stuv.go | 427 ++ vendor/github.com/blevesearch/geo/s2/util.go | 125 + .../blevesearch/geo/s2/wedge_relations.go | 97 + .../github.com/blevesearch/goleveldb/LICENSE | 24 + .../blevesearch/goleveldb/leveldb/batch.go | 349 ++ .../goleveldb/leveldb/cache/cache.go | 704 +++ .../goleveldb/leveldb/cache/lru.go | 195 + .../blevesearch/goleveldb/leveldb/comparer.go | 67 + .../leveldb/comparer/bytes_comparer.go | 51 + .../goleveldb/leveldb/comparer/comparer.go | 57 + .../blevesearch/goleveldb/leveldb/db.go | 1179 +++++ .../goleveldb/leveldb/db_compaction.go | 854 ++++ .../blevesearch/goleveldb/leveldb/db_iter.go | 360 ++ .../goleveldb/leveldb/db_snapshot.go | 187 + .../blevesearch/goleveldb/leveldb/db_state.go | 239 + .../goleveldb/leveldb/db_transaction.go | 329 ++ .../blevesearch/goleveldb/leveldb/db_util.go | 102 + .../blevesearch/goleveldb/leveldb/db_write.go | 464 ++ .../blevesearch/goleveldb/leveldb/doc.go | 92 + .../blevesearch/goleveldb/leveldb/errors.go | 20 + .../goleveldb/leveldb/errors/errors.go | 78 + .../blevesearch/goleveldb/leveldb/filter.go | 31 + .../goleveldb/leveldb/filter/bloom.go | 116 + .../goleveldb/leveldb/filter/filter.go | 60 + .../goleveldb/leveldb/iterator/array_iter.go | 184 + .../leveldb/iterator/indexed_iter.go | 242 + .../goleveldb/leveldb/iterator/iter.go | 132 + .../goleveldb/leveldb/iterator/merged_iter.go | 304 ++ .../goleveldb/leveldb/journal/journal.go | 524 +++ .../blevesearch/goleveldb/leveldb/key.go | 143 + .../goleveldb/leveldb/memdb/memdb.go | 479 ++ .../goleveldb/leveldb/opt/options.go | 697 +++ .../blevesearch/goleveldb/leveldb/options.go | 107 + .../blevesearch/goleveldb/leveldb/session.go | 210 + .../goleveldb/leveldb/session_compaction.go | 302 ++ .../goleveldb/leveldb/session_record.go | 323 ++ .../goleveldb/leveldb/session_util.go | 271 ++ .../blevesearch/goleveldb/leveldb/storage.go | 63 + .../goleveldb/leveldb/storage/file_storage.go | 671 +++ .../leveldb/storage/file_storage_nacl.go | 34 + .../leveldb/storage/file_storage_plan9.go | 63 + .../leveldb/storage/file_storage_solaris.go | 81 + .../leveldb/storage/file_storage_unix.go | 98 + .../leveldb/storage/file_storage_windows.go | 78 + .../goleveldb/leveldb/storage/mem_storage.go | 222 + .../goleveldb/leveldb/storage/storage.go | 187 + .../blevesearch/goleveldb/leveldb/table.go | 531 +++ .../goleveldb/leveldb/table/reader.go | 1139 +++++ .../goleveldb/leveldb/table/table.go | 177 + .../goleveldb/leveldb/table/writer.go | 375 ++ .../blevesearch/goleveldb/leveldb/util.go | 98 + .../goleveldb/leveldb/util/buffer.go | 293 ++ .../goleveldb/leveldb/util/buffer_pool.go | 239 + .../goleveldb/leveldb/util/crc32.go | 30 + .../goleveldb/leveldb/util/hash.go | 48 + .../goleveldb/leveldb/util/range.go | 32 + .../goleveldb/leveldb/util/util.go | 73 + .../blevesearch/goleveldb/leveldb/version.go | 528 +++ .../gtreap/.gitignore | 0 .../{steveyen => blevesearch}/gtreap/LICENSE | 0 .../gtreap/README.md | 0 vendor/github.com/blevesearch/gtreap/go.mod | 3 + .../{steveyen => blevesearch}/gtreap/treap.go | 35 +- vendor/github.com/blevesearch/mmap-go/go.mod | 4 +- vendor/github.com/blevesearch/mmap-go/go.sum | 4 +- .../blevesearch/scorch_segment_api/v2/go.mod | 10 +- .../blevesearch/scorch_segment_api/v2/go.sum | 28 +- .../scorch_segment_api/v2/segment.go | 22 + vendor/github.com/blevesearch/vellum/fst.go | 2 +- vendor/github.com/blevesearch/vellum/go.mod | 15 +- vendor/github.com/blevesearch/vellum/go.sum | 50 +- .../blevesearch/zapx/v11/.golangci.yml | 1 - .../github.com/blevesearch/zapx/v11/build.go | 56 +- .../blevesearch/zapx/v11/docvalues.go | 12 + vendor/github.com/blevesearch/zapx/v11/go.mod | 22 +- vendor/github.com/blevesearch/zapx/v11/go.sum | 80 +- .../blevesearch/zapx/v11/memuvarint.go | 11 +- .../blevesearch/zapx/v11/posting.go | 45 +- .../blevesearch/zapx/v11/segment.go | 30 + .../blevesearch/zapx/v12/.golangci.yml | 1 - .../github.com/blevesearch/zapx/v12/build.go | 56 +- .../blevesearch/zapx/v12/docvalues.go | 12 + vendor/github.com/blevesearch/zapx/v12/go.mod | 22 +- vendor/github.com/blevesearch/zapx/v12/go.sum | 80 +- .../blevesearch/zapx/v12/memuvarint.go | 11 +- .../blevesearch/zapx/v12/posting.go | 45 +- .../blevesearch/zapx/v12/segment.go | 30 + .../blevesearch/zapx/v13/.golangci.yml | 1 - .../github.com/blevesearch/zapx/v13/build.go | 56 +- .../blevesearch/zapx/v13/docvalues.go | 12 + vendor/github.com/blevesearch/zapx/v13/go.mod | 22 +- vendor/github.com/blevesearch/zapx/v13/go.sum | 80 +- .../blevesearch/zapx/v13/memuvarint.go | 11 +- .../blevesearch/zapx/v13/posting.go | 45 +- .../blevesearch/zapx/v13/segment.go | 30 + .../blevesearch/zapx/v14/.golangci.yml | 1 - .../github.com/blevesearch/zapx/v14/build.go | 56 +- .../blevesearch/zapx/v14/docvalues.go | 12 + vendor/github.com/blevesearch/zapx/v14/go.mod | 22 +- vendor/github.com/blevesearch/zapx/v14/go.sum | 80 +- .../blevesearch/zapx/v14/memuvarint.go | 11 +- .../blevesearch/zapx/v14/posting.go | 45 +- .../blevesearch/zapx/v14/segment.go | 30 + .../blevesearch/zapx/v15/.golangci.yml | 1 - .../github.com/blevesearch/zapx/v15/build.go | 56 +- .../blevesearch/zapx/v15/contentcoder.go | 29 +- .../github.com/blevesearch/zapx/v15/dict.go | 18 + .../blevesearch/zapx/v15/docvalues.go | 49 +- vendor/github.com/blevesearch/zapx/v15/go.mod | 22 +- vendor/github.com/blevesearch/zapx/v15/go.sum | 80 +- .../blevesearch/zapx/v15/intDecoder.go | 15 + .../blevesearch/zapx/v15/intcoder.go | 14 + .../blevesearch/zapx/v15/memuvarint.go | 11 +- vendor/github.com/blevesearch/zapx/v15/new.go | 34 +- .../blevesearch/zapx/v15/posting.go | 71 +- .../blevesearch/zapx/v15/segment.go | 55 + .../glycerine/go-unsnap-stream/.gitignore | 22 - .../glycerine/go-unsnap-stream/LICENSE | 21 - .../glycerine/go-unsnap-stream/README.md | 22 - .../glycerine/go-unsnap-stream/binary.dat | Bin 5592 -> 0 bytes .../go-unsnap-stream/binary.dat.snappy | Bin 5610 -> 0 bytes .../glycerine/go-unsnap-stream/rbuf.go | 375 -- .../glycerine/go-unsnap-stream/snap.go | 100 - .../glycerine/go-unsnap-stream/unenc.txt | 1 - .../go-unsnap-stream/unenc.txt.snappy | Bin 31 -> 0 bytes .../glycerine/go-unsnap-stream/unsnap.go | 519 --- vendor/github.com/golang/geo/LICENSE | 202 + vendor/github.com/golang/geo/r1/doc.go | 20 + vendor/github.com/golang/geo/r1/interval.go | 177 + vendor/github.com/golang/geo/r2/doc.go | 20 + vendor/github.com/golang/geo/r2/rect.go | 255 + vendor/github.com/golang/geo/r3/doc.go | 20 + .../github.com/golang/geo/r3/precisevector.go | 198 + vendor/github.com/golang/geo/r3/vector.go | 183 + vendor/github.com/golang/geo/s1/angle.go | 120 + vendor/github.com/golang/geo/s1/chordangle.go | 320 ++ vendor/github.com/golang/geo/s1/doc.go | 20 + vendor/github.com/golang/geo/s1/interval.go | 462 ++ .../github.com/json-iterator/go/.codecov.yml | 3 + vendor/github.com/json-iterator/go/.gitignore | 4 + .../github.com/json-iterator/go/.travis.yml | 14 + vendor/github.com/json-iterator/go/LICENSE | 21 + vendor/github.com/json-iterator/go/README.md | 86 + .../json-iterator/go/feature_adapter.go | 133 + .../json-iterator/go/feature_any.go | 245 + .../json-iterator/go/feature_any_array.go | 278 ++ .../json-iterator/go/feature_any_bool.go | 137 + .../json-iterator/go/feature_any_float.go | 83 + .../json-iterator/go/feature_any_int32.go | 74 + .../json-iterator/go/feature_any_int64.go | 74 + .../json-iterator/go/feature_any_invalid.go | 82 + .../json-iterator/go/feature_any_nil.go | 69 + .../json-iterator/go/feature_any_number.go | 104 + .../json-iterator/go/feature_any_object.go | 374 ++ .../json-iterator/go/feature_any_string.go | 166 + .../json-iterator/go/feature_any_uint32.go | 74 + .../json-iterator/go/feature_any_uint64.go | 74 + .../json-iterator/go/feature_config.go | 347 ++ .../json-iterator/go/feature_iter.go | 322 ++ .../json-iterator/go/feature_iter_array.go | 58 + .../json-iterator/go/feature_iter_float.go | 341 ++ .../json-iterator/go/feature_iter_int.go | 268 ++ .../json-iterator/go/feature_iter_object.go | 267 ++ .../json-iterator/go/feature_iter_skip.go | 129 + .../go/feature_iter_skip_sloppy.go | 144 + .../go/feature_iter_skip_strict.go | 89 + .../json-iterator/go/feature_iter_string.go | 215 + .../json-iterator/go/feature_json_number.go | 31 + .../json-iterator/go/feature_pool.go | 59 + .../json-iterator/go/feature_reflect.go | 721 +++ .../json-iterator/go/feature_reflect_array.go | 99 + .../go/feature_reflect_extension.go | 414 ++ .../json-iterator/go/feature_reflect_map.go | 244 + .../go/feature_reflect_native.go | 764 +++ .../go/feature_reflect_object.go | 196 + .../json-iterator/go/feature_reflect_slice.go | 147 + .../go/feature_reflect_struct_decoder.go | 934 ++++ .../json-iterator/go/feature_stream.go | 308 ++ .../json-iterator/go/feature_stream_float.go | 96 + .../json-iterator/go/feature_stream_int.go | 320 ++ .../json-iterator/go/feature_stream_string.go | 396 ++ .../go/fuzzy_mode_convert_table.md | 7 + .../github.com/json-iterator/go/jsoniter.go | 18 + vendor/github.com/json-iterator/go/test.sh | 12 + vendor/github.com/philhofer/fwd/LICENSE.md | 7 - vendor/github.com/philhofer/fwd/README.md | 315 -- vendor/github.com/philhofer/fwd/reader.go | 383 -- vendor/github.com/philhofer/fwd/writer.go | 236 - .../philhofer/fwd/writer_appengine.go | 5 - .../github.com/philhofer/fwd/writer_unsafe.go | 18 - vendor/github.com/steveyen/gtreap/go.mod | 3 - vendor/github.com/tinylib/msgp/LICENSE | 8 - .../tinylib/msgp/msgp/advise_linux.go | 24 - .../tinylib/msgp/msgp/advise_other.go | 17 - .../github.com/tinylib/msgp/msgp/circular.go | 39 - vendor/github.com/tinylib/msgp/msgp/defs.go | 142 - vendor/github.com/tinylib/msgp/msgp/edit.go | 242 - vendor/github.com/tinylib/msgp/msgp/elsize.go | 99 - vendor/github.com/tinylib/msgp/msgp/errors.go | 317 -- .../github.com/tinylib/msgp/msgp/extension.go | 549 --- vendor/github.com/tinylib/msgp/msgp/file.go | 92 - .../github.com/tinylib/msgp/msgp/file_port.go | 47 - .../github.com/tinylib/msgp/msgp/integers.go | 174 - vendor/github.com/tinylib/msgp/msgp/json.go | 568 --- .../tinylib/msgp/msgp/json_bytes.go | 363 -- vendor/github.com/tinylib/msgp/msgp/number.go | 267 -- vendor/github.com/tinylib/msgp/msgp/purego.go | 15 - vendor/github.com/tinylib/msgp/msgp/read.go | 1363 ------ .../tinylib/msgp/msgp/read_bytes.go | 1197 ----- vendor/github.com/tinylib/msgp/msgp/size.go | 38 - vendor/github.com/tinylib/msgp/msgp/unsafe.go | 36 - vendor/github.com/tinylib/msgp/msgp/write.go | 861 ---- .../tinylib/msgp/msgp/write_bytes.go | 411 -- vendor/github.com/willf/bitset/go.mod | 3 - vendor/golang.org/x/sys/unix/README.md | 8 +- vendor/golang.org/x/sys/unix/aliases.go | 3 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 + .../unix/{asm_darwin_386.s => asm_bsd_386.s} | 10 +- .../{asm_openbsd_amd64.s => asm_bsd_amd64.s} | 8 +- .../unix/{asm_netbsd_arm.s => asm_bsd_arm.s} | 8 +- .../{asm_netbsd_amd64.s => asm_bsd_arm64.s} | 8 +- .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 - .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 - .../x/sys/unix/asm_dragonfly_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 1 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 1 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 1 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 1 + .../golang.org/x/sys/unix/asm_linux_riscv64.s | 4 +- .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 +- vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 - .../x/sys/unix/asm_openbsd_mips64.s | 1 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 + vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 ++ vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 + vendor/golang.org/x/sys/unix/constants.go | 3 +- vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 4 +- vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 4 +- vendor/golang.org/x/sys/unix/dev_zos.go | 29 + vendor/golang.org/x/sys/unix/dirent.go | 1 + vendor/golang.org/x/sys/unix/endian_big.go | 1 + vendor/golang.org/x/sys/unix/endian_little.go | 3 +- vendor/golang.org/x/sys/unix/env_unix.go | 3 +- vendor/golang.org/x/sys/unix/epoll_zos.go | 221 + vendor/golang.org/x/sys/unix/fcntl.go | 1 + .../x/sys/unix/fcntl_linux_32bit.go | 3 +- vendor/golang.org/x/sys/unix/fdset.go | 3 +- vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 + vendor/golang.org/x/sys/unix/gccgo.go | 4 +- .../x/sys/unix/gccgo_linux_amd64.go | 1 + vendor/golang.org/x/sys/unix/ifreq_linux.go | 142 + vendor/golang.org/x/sys/unix/ioctl.go | 1 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 219 + vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 + vendor/golang.org/x/sys/unix/mkall.sh | 16 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 58 +- vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 + vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 + vendor/golang.org/x/sys/unix/race.go | 1 + vendor/golang.org/x/sys/unix/race0.go | 3 +- .../x/sys/unix/readdirent_getdents.go | 1 + .../x/sys/unix/readdirent_getdirentries.go | 1 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 49 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 3 +- .../x/sys/unix/sockcmsg_unix_other.go | 7 +- vendor/golang.org/x/sys/unix/str.go | 1 + vendor/golang.org/x/sys/unix/syscall.go | 3 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 63 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 4 +- .../x/sys/unix/syscall_aix_ppc64.go | 4 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 68 +- .../x/sys/unix/syscall_darwin.1_12.go | 1 + .../x/sys/unix/syscall_darwin.1_13.go | 5 +- .../golang.org/x/sys/unix/syscall_darwin.go | 171 +- .../x/sys/unix/syscall_darwin_386.go | 50 - .../x/sys/unix/syscall_darwin_amd64.go | 1 + .../x/sys/unix/syscall_darwin_arm.go | 51 - .../x/sys/unix/syscall_darwin_arm64.go | 1 + .../x/sys/unix/syscall_darwin_libSystem.go | 10 +- .../x/sys/unix/syscall_dragonfly.go | 37 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 32 +- .../x/sys/unix/syscall_freebsd_386.go | 1 + .../x/sys/unix/syscall_freebsd_amd64.go | 1 + .../x/sys/unix/syscall_freebsd_arm.go | 1 + .../x/sys/unix/syscall_freebsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_illumos.go | 113 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 541 ++- .../x/sys/unix/syscall_linux_386.go | 59 +- .../x/sys/unix/syscall_linux_alarm.go | 14 + .../x/sys/unix/syscall_linux_amd64.go | 55 +- .../x/sys/unix/syscall_linux_amd64_gc.go | 4 +- .../x/sys/unix/syscall_linux_arm.go | 56 +- .../x/sys/unix/syscall_linux_arm64.go | 62 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 + .../x/sys/unix/syscall_linux_gc_386.go | 1 + .../x/sys/unix/syscall_linux_gc_arm.go | 1 + .../x/sys/unix/syscall_linux_gccgo_386.go | 1 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 + .../x/sys/unix/syscall_linux_loong64.go | 191 + .../x/sys/unix/syscall_linux_mips64x.go | 45 +- .../x/sys/unix/syscall_linux_mipsx.go | 49 +- .../x/sys/unix/syscall_linux_ppc.go | 236 + .../x/sys/unix/syscall_linux_ppc64x.go | 44 +- .../x/sys/unix/syscall_linux_riscv64.go | 57 +- .../x/sys/unix/syscall_linux_s390x.go | 56 +- .../x/sys/unix/syscall_linux_sparc64.go | 43 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 30 +- .../x/sys/unix/syscall_netbsd_386.go | 1 + .../x/sys/unix/syscall_netbsd_amd64.go | 1 + .../x/sys/unix/syscall_netbsd_arm.go | 1 + .../x/sys/unix/syscall_netbsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_openbsd.go | 21 +- .../x/sys/unix/syscall_openbsd_386.go | 1 + .../x/sys/unix/syscall_openbsd_amd64.go | 1 + .../x/sys/unix/syscall_openbsd_arm.go | 1 + .../x/sys/unix/syscall_openbsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_solaris.go | 346 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 56 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 5 +- .../x/sys/unix/syscall_unix_gc_ppc64x.go | 1 + .../x/sys/unix/syscall_zos_s390x.go | 1823 ++++++++ vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 + vendor/golang.org/x/sys/unix/sysvshm_unix.go | 61 + .../x/sys/unix/sysvshm_unix_other.go | 14 + vendor/golang.org/x/sys/unix/timestruct.go | 3 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 + .../x/sys/unix/zerrors_aix_ppc64.go | 1 + .../x/sys/unix/zerrors_darwin_386.go | 1788 ------- .../x/sys/unix/zerrors_darwin_amd64.go | 3050 ++++++------ .../x/sys/unix/zerrors_darwin_arm.go | 1788 ------- .../x/sys/unix/zerrors_darwin_arm64.go | 3050 ++++++------ .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 + .../x/sys/unix/zerrors_freebsd_386.go | 12 + .../x/sys/unix/zerrors_freebsd_amd64.go | 12 + .../x/sys/unix/zerrors_freebsd_arm.go | 21 + .../x/sys/unix/zerrors_freebsd_arm64.go | 12 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 324 +- .../x/sys/unix/zerrors_linux_386.go | 38 +- .../x/sys/unix/zerrors_linux_amd64.go | 38 +- .../x/sys/unix/zerrors_linux_arm.go | 38 +- .../x/sys/unix/zerrors_linux_arm64.go | 39 +- .../x/sys/unix/zerrors_linux_loong64.go | 818 ++++ .../x/sys/unix/zerrors_linux_mips.go | 38 +- .../x/sys/unix/zerrors_linux_mips64.go | 38 +- .../x/sys/unix/zerrors_linux_mips64le.go | 38 +- .../x/sys/unix/zerrors_linux_mipsle.go | 38 +- .../x/sys/unix/zerrors_linux_ppc.go | 885 ++++ .../x/sys/unix/zerrors_linux_ppc64.go | 38 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 38 +- .../x/sys/unix/zerrors_linux_riscv64.go | 38 +- .../x/sys/unix/zerrors_linux_s390x.go | 40 +- .../x/sys/unix/zerrors_linux_sparc64.go | 38 +- .../x/sys/unix/zerrors_netbsd_386.go | 1 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1 + .../x/sys/unix/zerrors_netbsd_arm.go | 1 + .../x/sys/unix/zerrors_netbsd_arm64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 4 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1 + .../x/sys/unix/zerrors_openbsd_arm.go | 4 + .../x/sys/unix/zerrors_openbsd_arm64.go | 1 + .../x/sys/unix/zerrors_openbsd_mips64.go | 1 + .../x/sys/unix/zerrors_solaris_amd64.go | 4 + .../x/sys/unix/zerrors_zos_s390x.go | 860 ++++ .../x/sys/unix/zptrace_armnn_linux.go | 1 + .../x/sys/unix/zptrace_mipsnn_linux.go | 1 + .../x/sys/unix/zptrace_mipsnnle_linux.go | 1 + .../x/sys/unix/zptrace_x86_linux.go | 1 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 27 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 25 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 24 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 22 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 39 - .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_386.go | 2430 ---------- .../x/sys/unix/zsyscall_darwin_386.s | 290 -- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 9 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 19 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 669 +-- .../x/sys/unix/zsyscall_darwin_amd64.s | 885 +++- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 39 - .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_arm.go | 2416 ---------- .../x/sys/unix/zsyscall_darwin_arm.s | 288 -- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 9 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 19 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 669 +-- .../x/sys/unix/zsyscall_darwin_arm64.s | 885 +++- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 7 +- .../x/sys/unix/zsyscall_freebsd_386.go | 5 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 5 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 5 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 5 +- .../x/sys/unix/zsyscall_illumos_amd64.go | 29 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 228 +- .../x/sys/unix/zsyscall_linux_386.go | 55 +- .../x/sys/unix/zsyscall_linux_amd64.go | 89 +- .../x/sys/unix/zsyscall_linux_arm.go | 69 +- .../x/sys/unix/zsyscall_linux_arm64.go | 27 +- .../x/sys/unix/zsyscall_linux_loong64.go | 552 +++ .../x/sys/unix/zsyscall_linux_mips.go | 68 +- .../x/sys/unix/zsyscall_linux_mips64.go | 45 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 48 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 68 +- .../x/sys/unix/zsyscall_linux_ppc.go | 709 +++ .../x/sys/unix/zsyscall_linux_ppc64.go | 82 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 82 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 16 +- .../x/sys/unix/zsyscall_linux_s390x.go | 59 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 55 +- .../x/sys/unix/zsyscall_netbsd_386.go | 11 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 11 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 11 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 11 +- .../x/sys/unix/zsyscall_openbsd_386.go | 5 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 5 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 5 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 5 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 5 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 108 +- .../x/sys/unix/zsyscall_zos_s390x.go | 1255 +++++ .../x/sys/unix/zsysctl_openbsd_386.go | 1 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_darwin_386.go | 437 -- .../x/sys/unix/zsysnum_darwin_amd64.go | 1 + .../x/sys/unix/zsysnum_darwin_arm.go | 437 -- .../x/sys/unix/zsysnum_darwin_arm64.go | 1 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 + .../x/sys/unix/zsysnum_freebsd_386.go | 1 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_386.go | 11 + .../x/sys/unix/zsysnum_linux_amd64.go | 715 +-- .../x/sys/unix/zsysnum_linux_arm.go | 11 + .../x/sys/unix/zsysnum_linux_arm64.go | 605 +-- .../x/sys/unix/zsysnum_linux_loong64.go | 313 ++ .../x/sys/unix/zsysnum_linux_mips.go | 10 + .../x/sys/unix/zsysnum_linux_mips64.go | 700 +-- .../x/sys/unix/zsysnum_linux_mips64le.go | 700 +-- .../x/sys/unix/zsysnum_linux_mipsle.go | 10 + .../x/sys/unix/zsysnum_linux_ppc.go | 441 ++ .../x/sys/unix/zsysnum_linux_ppc64.go | 798 ++-- .../x/sys/unix/zsysnum_linux_ppc64le.go | 798 ++-- .../x/sys/unix/zsysnum_linux_riscv64.go | 602 +-- .../x/sys/unix/zsysnum_linux_s390x.go | 728 +-- .../x/sys/unix/zsysnum_linux_sparc64.go | 756 +-- .../x/sys/unix/zsysnum_netbsd_386.go | 1 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_386.go | 1 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_zos_s390x.go | 2670 +++++++++++ .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 + .../x/sys/unix/ztypes_darwin_386.go | 516 --- .../x/sys/unix/ztypes_darwin_amd64.go | 247 + .../x/sys/unix/ztypes_darwin_arm.go | 516 --- .../x/sys/unix/ztypes_darwin_arm64.go | 247 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 4 + .../x/sys/unix/ztypes_freebsd_386.go | 17 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 17 +- .../x/sys/unix/ztypes_freebsd_arm.go | 17 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 17 +- .../x/sys/unix/ztypes_illumos_amd64.go | 42 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 1944 +++++++- .../golang.org/x/sys/unix/ztypes_linux_386.go | 72 +- .../x/sys/unix/ztypes_linux_amd64.go | 70 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 72 +- .../x/sys/unix/ztypes_linux_arm64.go | 70 +- .../x/sys/unix/ztypes_linux_loong64.go | 679 +++ .../x/sys/unix/ztypes_linux_mips.go | 71 +- .../x/sys/unix/ztypes_linux_mips64.go | 70 +- .../x/sys/unix/ztypes_linux_mips64le.go | 70 +- .../x/sys/unix/ztypes_linux_mipsle.go | 71 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 691 +++ .../x/sys/unix/ztypes_linux_ppc64.go | 69 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 69 +- .../x/sys/unix/ztypes_linux_riscv64.go | 70 +- .../x/sys/unix/ztypes_linux_s390x.go | 73 +- .../x/sys/unix/ztypes_linux_sparc64.go | 69 +- .../x/sys/unix/ztypes_netbsd_386.go | 5 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 5 +- .../x/sys/unix/ztypes_netbsd_arm.go | 5 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 5 +- .../x/sys/unix/ztypes_openbsd_386.go | 24 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 24 +- .../x/sys/unix/ztypes_openbsd_arm.go | 24 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 24 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 24 +- .../x/sys/unix/ztypes_solaris_amd64.go | 41 + .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 406 ++ vendor/golang.org/x/sys/windows/aliases.go | 4 +- vendor/golang.org/x/sys/windows/empty.s | 1 + vendor/golang.org/x/sys/windows/eventlog.go | 1 + .../golang.org/x/sys/windows/exec_windows.go | 91 +- .../x/sys/windows/memory_windows.go | 11 + vendor/golang.org/x/sys/windows/mkerrors.bash | 7 + vendor/golang.org/x/sys/windows/mksyscall.go | 3 +- vendor/golang.org/x/sys/windows/race.go | 1 + vendor/golang.org/x/sys/windows/race0.go | 1 + .../x/sys/windows/security_windows.go | 30 +- vendor/golang.org/x/sys/windows/service.go | 14 +- .../x/sys/windows/setupapi_windows.go | 1425 ++++++ .../x/sys/windows/setupapierrors_windows.go | 100 - vendor/golang.org/x/sys/windows/str.go | 1 + vendor/golang.org/x/sys/windows/syscall.go | 1 + .../x/sys/windows/syscall_windows.go | 275 +- .../golang.org/x/sys/windows/types_windows.go | 1091 ++++- .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 2619 ++++++++++- .../x/sys/windows/zsyscall_windows.go | 960 +++- .../encoding/simplifiedchinese/hzgb2312.go | 2 +- .../text/internal/language/compact/tables.go | 36 +- .../x/text/internal/language/language.go | 133 +- .../x/text/internal/language/parse.go | 44 +- .../x/text/internal/language/tables.go | 4089 +++++++++-------- vendor/golang.org/x/text/language/go1_1.go | 1 + vendor/golang.org/x/text/language/go1_2.go | 1 + vendor/golang.org/x/text/language/language.go | 4 + vendor/golang.org/x/text/language/parse.go | 22 + vendor/golang.org/x/text/language/tables.go | 86 +- vendor/modules.txt | 71 +- 763 files changed, 92110 insertions(+), 35723 deletions(-) delete mode 100644 vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/byte_input.go create mode 100644 vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go create mode 100644 vendor/github.com/RoaringBitmap/roaring/internal/pools.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go delete mode 100644 vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go rename vendor/github.com/{willf => bits-and-blooms}/bitset/.gitignore (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/.travis.yml (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/LICENSE (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/README.md (84%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/azure-pipelines.yml (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/bitset.go (97%) create mode 100644 vendor/github.com/bits-and-blooms/bitset/go.mod rename vendor/github.com/{willf => bits-and-blooms}/bitset/go.sum (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_19.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_amd64.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_amd64.s (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/popcnt_generic.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/trailing_zeros_18.go (100%) rename vendor/github.com/{willf => bits-and-blooms}/bitset/trailing_zeros_19.go (100%) create mode 100644 vendor/github.com/blevesearch/bleve/v2/SECURITY.md create mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_geoshape.go create mode 100644 vendor/github.com/blevesearch/bleve/v2/document/field_ip.go create mode 100644 vendor/github.com/blevesearch/bleve/v2/geo/geo_s2plugin_impl.go create mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/geo_shape.go create mode 100644 vendor/github.com/blevesearch/bleve/v2/search/query/ip_range.go create mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoshape.go create mode 100644 vendor/github.com/blevesearch/bleve/v2/search/searcher/search_ip_range.go create mode 100644 vendor/github.com/blevesearch/bleve_index_api/directory.go create mode 100644 vendor/github.com/blevesearch/bleve_index_api/spatial_plugin.go create mode 100644 vendor/github.com/blevesearch/geo/LICENSE create mode 100644 vendor/github.com/blevesearch/geo/geojson/geojson_s2_util.go create mode 100644 vendor/github.com/blevesearch/geo/geojson/geojson_shapes_impl.go create mode 100644 vendor/github.com/blevesearch/geo/geojson/geojson_shapes_util.go create mode 100644 vendor/github.com/blevesearch/geo/s2/bits_go18.go create mode 100644 vendor/github.com/blevesearch/geo/s2/bits_go19.go create mode 100644 vendor/github.com/blevesearch/geo/s2/cap.go create mode 100644 vendor/github.com/blevesearch/geo/s2/cell.go create mode 100644 vendor/github.com/blevesearch/geo/s2/cell_index.go create mode 100644 vendor/github.com/blevesearch/geo/s2/cellid.go create mode 100644 vendor/github.com/blevesearch/geo/s2/cellunion.go create mode 100644 vendor/github.com/blevesearch/geo/s2/centroids.go create mode 100644 vendor/github.com/blevesearch/geo/s2/contains_point_query.go create mode 100644 vendor/github.com/blevesearch/geo/s2/contains_vertex_query.go create mode 100644 vendor/github.com/blevesearch/geo/s2/convex_hull_query.go create mode 100644 vendor/github.com/blevesearch/geo/s2/crossing_edge_query.go create mode 100644 vendor/github.com/blevesearch/geo/s2/distance_target.go create mode 100644 vendor/github.com/blevesearch/geo/s2/doc.go create mode 100644 vendor/github.com/blevesearch/geo/s2/edge_clipping.go create mode 100644 vendor/github.com/blevesearch/geo/s2/edge_crosser.go create mode 100644 vendor/github.com/blevesearch/geo/s2/edge_crossings.go create mode 100644 vendor/github.com/blevesearch/geo/s2/edge_distances.go create mode 100644 vendor/github.com/blevesearch/geo/s2/edge_query.go create mode 100644 vendor/github.com/blevesearch/geo/s2/edge_tessellator.go create mode 100644 vendor/github.com/blevesearch/geo/s2/encode.go create mode 100644 vendor/github.com/blevesearch/geo/s2/interleave.go create mode 100644 vendor/github.com/blevesearch/geo/s2/latlng.go create mode 100644 vendor/github.com/blevesearch/geo/s2/lexicon.go create mode 100644 vendor/github.com/blevesearch/geo/s2/loop.go create mode 100644 vendor/github.com/blevesearch/geo/s2/matrix3x3.go create mode 100644 vendor/github.com/blevesearch/geo/s2/max_distance_targets.go create mode 100644 vendor/github.com/blevesearch/geo/s2/metric.go create mode 100644 vendor/github.com/blevesearch/geo/s2/min_distance_targets.go create mode 100644 vendor/github.com/blevesearch/geo/s2/nthderivative.go create mode 100644 vendor/github.com/blevesearch/geo/s2/paddedcell.go create mode 100644 vendor/github.com/blevesearch/geo/s2/point.go create mode 100644 vendor/github.com/blevesearch/geo/s2/point_measures.go create mode 100644 vendor/github.com/blevesearch/geo/s2/point_vector.go create mode 100644 vendor/github.com/blevesearch/geo/s2/pointcompression.go create mode 100644 vendor/github.com/blevesearch/geo/s2/polygon.go create mode 100644 vendor/github.com/blevesearch/geo/s2/polyline.go create mode 100644 vendor/github.com/blevesearch/geo/s2/polyline_measures.go create mode 100644 vendor/github.com/blevesearch/geo/s2/predicates.go create mode 100644 vendor/github.com/blevesearch/geo/s2/projections.go create mode 100644 vendor/github.com/blevesearch/geo/s2/query_entry.go create mode 100644 vendor/github.com/blevesearch/geo/s2/query_options.go create mode 100644 vendor/github.com/blevesearch/geo/s2/rect.go create mode 100644 vendor/github.com/blevesearch/geo/s2/rect_bounder.go create mode 100644 vendor/github.com/blevesearch/geo/s2/region.go create mode 100644 vendor/github.com/blevesearch/geo/s2/region_term_indexer.go create mode 100644 vendor/github.com/blevesearch/geo/s2/regioncoverer.go create mode 100644 vendor/github.com/blevesearch/geo/s2/regionunion.go create mode 100644 vendor/github.com/blevesearch/geo/s2/shape.go create mode 100644 vendor/github.com/blevesearch/geo/s2/shapeindex.go create mode 100644 vendor/github.com/blevesearch/geo/s2/shapeutil.go create mode 100644 vendor/github.com/blevesearch/geo/s2/shapeutil_edge_iterator.go create mode 100644 vendor/github.com/blevesearch/geo/s2/stuv.go create mode 100644 vendor/github.com/blevesearch/geo/s2/util.go create mode 100644 vendor/github.com/blevesearch/geo/s2/wedge_relations.go create mode 100644 vendor/github.com/blevesearch/goleveldb/LICENSE create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/batch.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/cache/cache.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/cache/lru.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/comparer.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/comparer/bytes_comparer.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/comparer/comparer.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db_compaction.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db_iter.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db_snapshot.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db_state.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db_transaction.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db_util.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/db_write.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/doc.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/errors.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/errors/errors.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/filter.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/filter/bloom.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/filter/filter.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/iterator/array_iter.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/iterator/indexed_iter.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/iterator/iter.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/iterator/merged_iter.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/journal/journal.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/key.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/memdb/memdb.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/opt/options.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/options.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/session.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/session_compaction.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/session_record.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/session_util.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/file_storage.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/file_storage_nacl.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/file_storage_plan9.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/file_storage_solaris.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/file_storage_unix.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/file_storage_windows.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/mem_storage.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/storage/storage.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/table.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/table/reader.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/table/table.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/table/writer.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/util.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/util/buffer.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/util/buffer_pool.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/util/crc32.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/util/hash.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/util/range.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/util/util.go create mode 100644 vendor/github.com/blevesearch/goleveldb/leveldb/version.go rename vendor/github.com/{steveyen => blevesearch}/gtreap/.gitignore (100%) rename vendor/github.com/{steveyen => blevesearch}/gtreap/LICENSE (100%) rename vendor/github.com/{steveyen => blevesearch}/gtreap/README.md (100%) create mode 100644 vendor/github.com/blevesearch/gtreap/go.mod rename vendor/github.com/{steveyen => blevesearch}/gtreap/treap.go (83%) delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/.gitignore delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/LICENSE delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/README.md delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/binary.dat delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/binary.dat.snappy delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/rbuf.go delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/snap.go delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/unenc.txt delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/unenc.txt.snappy delete mode 100644 vendor/github.com/glycerine/go-unsnap-stream/unsnap.go create mode 100644 vendor/github.com/golang/geo/LICENSE create mode 100644 vendor/github.com/golang/geo/r1/doc.go create mode 100644 vendor/github.com/golang/geo/r1/interval.go create mode 100644 vendor/github.com/golang/geo/r2/doc.go create mode 100644 vendor/github.com/golang/geo/r2/rect.go create mode 100644 vendor/github.com/golang/geo/r3/doc.go create mode 100644 vendor/github.com/golang/geo/r3/precisevector.go create mode 100644 vendor/github.com/golang/geo/r3/vector.go create mode 100644 vendor/github.com/golang/geo/s1/angle.go create mode 100644 vendor/github.com/golang/geo/s1/chordangle.go create mode 100644 vendor/github.com/golang/geo/s1/doc.go create mode 100644 vendor/github.com/golang/geo/s1/interval.go create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 vendor/github.com/json-iterator/go/.gitignore create mode 100644 vendor/github.com/json-iterator/go/.travis.yml create mode 100644 vendor/github.com/json-iterator/go/LICENSE create mode 100644 vendor/github.com/json-iterator/go/README.md create mode 100644 vendor/github.com/json-iterator/go/feature_adapter.go create mode 100644 vendor/github.com/json-iterator/go/feature_any.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_array.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_bool.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_float.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_int32.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_int64.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_invalid.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_nil.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_number.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_object.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_string.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_uint32.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_uint64.go create mode 100644 vendor/github.com/json-iterator/go/feature_config.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_array.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_float.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_int.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_object.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip_strict.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_string.go create mode 100644 vendor/github.com/json-iterator/go/feature_json_number.go create mode 100644 vendor/github.com/json-iterator/go/feature_pool.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_extension.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_object.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_slice.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream_float.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream_int.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream_string.go create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 vendor/github.com/json-iterator/go/test.sh delete mode 100644 vendor/github.com/philhofer/fwd/LICENSE.md delete mode 100644 vendor/github.com/philhofer/fwd/README.md delete mode 100644 vendor/github.com/philhofer/fwd/reader.go delete mode 100644 vendor/github.com/philhofer/fwd/writer.go delete mode 100644 vendor/github.com/philhofer/fwd/writer_appengine.go delete mode 100644 vendor/github.com/philhofer/fwd/writer_unsafe.go delete mode 100644 vendor/github.com/steveyen/gtreap/go.mod delete mode 100644 vendor/github.com/tinylib/msgp/LICENSE delete mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_linux.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_other.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/circular.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/defs.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/edit.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/elsize.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/errors.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/extension.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/file.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/file_port.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/integers.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/json.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/number.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/purego.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/read.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/size.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/unsafe.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/write.go delete mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes.go delete mode 100644 vendor/github.com/willf/bitset/go.mod rename vendor/golang.org/x/sys/unix/{asm_darwin_386.s => asm_bsd_386.s} (72%) rename vendor/golang.org/x/sys/unix/{asm_openbsd_amd64.s => asm_bsd_amd64.s} (72%) rename vendor/golang.org/x/sys/unix/{asm_netbsd_arm.s => asm_bsd_arm.s} (76%) rename vendor/golang.org/x/sys/unix/{asm_netbsd_amd64.s => asm_bsd_arm64.s} (75%) delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go create mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go create mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/ifreq_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_alarm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix_other.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go diff --git a/go.mod b/go.mod index 75ac6a9..e4bfbb5 100644 --- a/go.mod +++ b/go.mod @@ -3,18 +3,20 @@ module git.autistici.org/ale/liber go 1.14 require ( - github.com/RoaringBitmap/roaring v0.5.5 // indirect - github.com/blevesearch/bleve/v2 v2.0.3 + github.com/blevesearch/bleve/v2 v2.3.6 github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a // indirect + github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect github.com/golang/protobuf v1.4.3 // indirect github.com/golang/snappy v0.0.2 // indirect github.com/google/subcommands v1.2.0 + github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 // indirect github.com/gorilla/mux v1.8.0 + github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/meskio/epubgo v0.0.0-20160213181628-90dd5d78197f github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 github.com/tinylib/msgp v1.1.5 // indirect github.com/willf/bitset v1.1.11 // indirect - golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect google.golang.org/protobuf v1.25.0 // indirect ) diff --git a/go.sum b/go.sum index af504d7..53e539e 100644 --- a/go.sum +++ b/go.sum @@ -4,51 +4,83 @@ github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhIN github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/RoaringBitmap/roaring v0.5.5 h1:naNqvO1mNnghk2UvcsqnzHDBn9DRbCIRy94GmDTRVTQ= github.com/RoaringBitmap/roaring v0.5.5/go.mod h1:puNo5VdzwbaIQxSiDIwfXl4Hnc+fbovcX4IW/dSTtUk= +github.com/RoaringBitmap/roaring v0.9.4 h1:ckvZSX5gwCRaJYBNe7syNawCU5oruY9gQmjXlp4riwo= +github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blevesearch/bleve/v2 v2.0.1 h1:v1eV5K+/lndsjnykeVcuU9J4cJnjKLUKSwxXFxZsLuY= github.com/blevesearch/bleve/v2 v2.0.1/go.mod h1:OBP2Pktqik8vEiUlGhuWjYx7KiO4zD542+DHqICwM5w= github.com/blevesearch/bleve/v2 v2.0.2 h1:D93VfhOiR6wALovgjsK5XNPeDRrZQlUEIq4YWFeaiTw= github.com/blevesearch/bleve/v2 v2.0.2/go.mod h1:ip+4iafiEq2gCY5rJXe87bT6LkF/OJMCjQEYIfTBfW8= github.com/blevesearch/bleve/v2 v2.0.3 h1:mDrwrsRIA4PDYkfUNjoh5zGECvquuJIA3MJU5ivaO8E= github.com/blevesearch/bleve/v2 v2.0.3/go.mod h1:ip+4iafiEq2gCY5rJXe87bT6LkF/OJMCjQEYIfTBfW8= +github.com/blevesearch/bleve/v2 v2.3.6 h1:NlntUHcV5CSWIhpugx4d/BRMGCiaoI8ZZXrXlahzNq4= +github.com/blevesearch/bleve/v2 v2.3.6/go.mod h1:JM2legf1cKVkdV8Ehu7msKIOKC0McSw0Q16Fmv9vsW4= github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= +github.com/blevesearch/bleve_index_api v1.0.5 h1:Lc986kpC4Z0/n1g3gg8ul7H+lxgOQPcXb9SxvQGu+tw= +github.com/blevesearch/bleve_index_api v1.0.5/go.mod h1:YXMDwaXFFXwncRS8UobWs7nvo0DmusriM1nztTlj1ms= +github.com/blevesearch/geo v0.1.16 h1:unVaqUmlwprk56596OQRkGjtq1VZ8XFWSARj+h2cIBY= +github.com/blevesearch/geo v0.1.16/go.mod h1:a1OlySNE+oDQ5qY0vJGYNoLIsMpbKbx8dnmuRP8D7H0= +github.com/blevesearch/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:9eJDeqxJ3E7WnLebQUlPD7ZjSce7AnDb9vjGmMCbD0A= github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= +github.com/blevesearch/goleveldb v1.0.1 h1:iAtV2Cu5s0GD1lwUiekkFHe2gTMCCNVj2foPclDLIFI= +github.com/blevesearch/goleveldb v1.0.1/go.mod h1:WrU8ltZbIp0wAoig/MHbrPCXSOLpe79nz5lv5nqfYrQ= +github.com/blevesearch/gtreap v0.1.1 h1:2JWigFrzDMR+42WGIN/V2p0cUvn4UP3C4Q5nmaZGW8Y= +github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk= github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= +github.com/blevesearch/mmap-go v1.0.4 h1:OVhDhT5B/M1HNPpYPBKIEJaD0F3Si+CrEKULGCDPWmc= +github.com/blevesearch/mmap-go v1.0.4/go.mod h1:EWmEAOmdAS9z/pi/+Toxu99DnsbhG1TIxUoRmJw/pSs= github.com/blevesearch/scorch_segment_api v1.0.0 h1:BUkCPWDg2gimTEyVDXf85I2buqqt4lh28uaVMiJsIYk= github.com/blevesearch/scorch_segment_api v1.0.0/go.mod h1:KgRYmlfYC27NeM6cXOHx8LBgq7jn0atpV8mVWoBKBng= github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= +github.com/blevesearch/scorch_segment_api/v2 v2.1.4 h1:LmGmo5twU3gV+natJbKmOktS9eMhokPGKWuR+jX84vk= +github.com/blevesearch/scorch_segment_api/v2 v2.1.4/go.mod h1:PgVnbbg/t1UkgezPDu8EHLi1BHQ17xUwsFdU6NnOYS0= github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= +github.com/blevesearch/snowball v0.6.1/go.mod h1:ZF0IBg5vgpeoUhnMza2v0A/z8m1cWPlwhke08LpNusg= github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU= github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q= github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= +github.com/blevesearch/vellum v1.0.9 h1:PL+NWVk3dDGPCV0hoDu9XLLJgqU4E5s/dOeEJByQ2uQ= +github.com/blevesearch/vellum v1.0.9/go.mod h1:ul1oT0FhSMDIExNjIxHqJoGpVrBpKCdgDQNxfqgJt7k= github.com/blevesearch/zapx/v11 v11.1.10 h1:8Eo3rXiHsVSP9Sk+4StrrwLrj9vyulhMVPmxTf8ZuDg= github.com/blevesearch/zapx/v11 v11.1.10/go.mod h1:DTjbcBqrr/Uo82UBilDC8lEew42gN/OcIyiTNFtSijc= github.com/blevesearch/zapx/v11 v11.2.0 h1:GBkCJYsyj3eIU4+aiLPxoMz1PYvDbQZl/oXHIBZIP60= github.com/blevesearch/zapx/v11 v11.2.0/go.mod h1:gN/a0alGw1FZt/YGTo1G6Z6XpDkeOfujX5exY9sCQQM= +github.com/blevesearch/zapx/v11 v11.3.7 h1:Y6yIAF/DVPiqZUA/jNgSLXmqewfzwHzuwfKyfdG+Xaw= +github.com/blevesearch/zapx/v11 v11.3.7/go.mod h1:Xk9Z69AoAWIOvWudNDMlxJDqSYGf90LS0EfnaAIvXCA= github.com/blevesearch/zapx/v12 v12.1.10 h1:sqR+/0Z4dSTovApRqLA1HnilMtQer7a4UvPrNmPzlTM= github.com/blevesearch/zapx/v12 v12.1.10/go.mod h1:14NmKnPrnKAIyiEJM566k/Jk+FQpuiflT5d3uaaK3MI= github.com/blevesearch/zapx/v12 v12.2.0 h1:dyRcSoZVO1jktL4UpGkCEF1AYa3xhKPirh4/N+Va+Ww= github.com/blevesearch/zapx/v12 v12.2.0/go.mod h1:fdjwvCwWWwJW/EYTYGtAp3gBA0geCYGLcVTtJEZnY6A= +github.com/blevesearch/zapx/v12 v12.3.7 h1:DfQ6rsmZfEK4PzzJJRXjiM6AObG02+HWvprlXQ1Y7eI= +github.com/blevesearch/zapx/v12 v12.3.7/go.mod h1:SgEtYIBGvM0mgIBn2/tQE/5SdrPXaJUaT/kVqpAPxm0= github.com/blevesearch/zapx/v13 v13.1.10 h1:zCneEVRJDXwtDfSwh+33Dxguliv192vCK283zdGH4Sw= github.com/blevesearch/zapx/v13 v13.1.10/go.mod h1:YsVY6YGpTEAlJOMjdL7EsdBLvjWd8kPa2gwJDNpqLJo= github.com/blevesearch/zapx/v13 v13.2.0 h1:mUqbaqQABp8nBE4t4q2qMyHCCq4sykoV8r7aJk4ih3s= github.com/blevesearch/zapx/v13 v13.2.0/go.mod h1:o5rAy/lRS5JpAbITdrOHBS/TugWYbkcYZTz6VfEinAQ= +github.com/blevesearch/zapx/v13 v13.3.7 h1:igIQg5eKmjw168I7av0Vtwedf7kHnQro/M+ubM4d2l8= +github.com/blevesearch/zapx/v13 v13.3.7/go.mod h1:yyrB4kJ0OT75UPZwT/zS+Ru0/jYKorCOOSY5dBzAy+s= github.com/blevesearch/zapx/v14 v14.1.10 h1:nD0vw2jxKogJFfA5WyoS4wNwZlVby3Aq8aW7CZi6YIw= github.com/blevesearch/zapx/v14 v14.1.10/go.mod h1:hsULl5eJSxs5NEfBsmeT9qrqdCP+/ecpVZKt60M4V64= github.com/blevesearch/zapx/v14 v14.2.0 h1:UsfRqvM9RJxKNKrkR1U7aYc1cv9MWx719fsAjbF6joI= github.com/blevesearch/zapx/v14 v14.2.0/go.mod h1:GNgZusc1p4ot040cBQMRGEZobvwjCquiEKYh1xLFK9g= +github.com/blevesearch/zapx/v14 v14.3.7 h1:gfe+fbWslDWP/evHLtp/GOvmNM3sw1BbqD7LhycBX20= +github.com/blevesearch/zapx/v14 v14.3.7/go.mod h1:9J/RbOkqZ1KSjmkOes03AkETX7hrXT0sFMpWH4ewC4w= github.com/blevesearch/zapx/v15 v15.1.10 h1:kZR3b9jO9l6s2B5UHI+1N1llLzJ4nYikkXQTMrDl1vQ= github.com/blevesearch/zapx/v15 v15.1.10/go.mod h1:4ypq25bwtSQKzwEF1UERyIhmGTbMT3brY/n4NC5gRnM= github.com/blevesearch/zapx/v15 v15.2.0 h1:ZpibwcrrOaeslkOw3sJ7npP7KDgRHI/DkACjKTqFwyM= github.com/blevesearch/zapx/v15 v15.2.0/go.mod h1:MmQceLpWfME4n1WrBFIwplhWmaQbQqLQARpaKUEOs/A= +github.com/blevesearch/zapx/v15 v15.3.8 h1:q4uMngBHzL1IIhRc8AJUEkj6dGOE3u1l3phLu7hq8uk= +github.com/blevesearch/zapx/v15 v15.3.8/go.mod h1:m7Y6m8soYUvS7MjN9eKlz1xrLCcmqfFadmu7GhWIrLY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -56,9 +88,11 @@ github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8Nz github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= +github.com/couchbase/moss v0.2.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= github.com/couchbase/vellum v1.0.2 h1:BrbP0NKiyDdndMPec8Jjhy0U47CZ0Lgx3xUC2r9rZqw= github.com/couchbase/vellum v1.0.2/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -72,6 +106,8 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a h1:FQqo github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo= +github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -96,6 +132,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= @@ -108,6 +145,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/json-iterator/go v0.0.0-20171115153421-f7279a603ede h1:YrgBGwxMRK0Vq0WSCWFaZUnTsrA/PZE/xs1QZh+/edg= +github.com/json-iterator/go v0.0.0-20171115153421-f7279a603ede/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= @@ -138,11 +177,14 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= @@ -150,6 +192,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 h1:uSDYjYejelKyceA6DiCsngFof9jAyeaSyX9XC5a1a7Q= @@ -207,9 +250,13 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -253,5 +300,7 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/RoaringBitmap/roaring/.gitignore b/vendor/github.com/RoaringBitmap/roaring/.gitignore index b7943ab..851f323 100644 --- a/vendor/github.com/RoaringBitmap/roaring/.gitignore +++ b/vendor/github.com/RoaringBitmap/roaring/.gitignore @@ -3,4 +3,3 @@ roaring-fuzz.zip workdir coverage.out testdata/all3.classic -testdata/all3.msgp.snappy diff --git a/vendor/github.com/RoaringBitmap/roaring/.travis.yml b/vendor/github.com/RoaringBitmap/roaring/.travis.yml index 12266d7..0a4c4e9 100644 --- a/vendor/github.com/RoaringBitmap/roaring/.travis.yml +++ b/vendor/github.com/RoaringBitmap/roaring/.travis.yml @@ -17,7 +17,7 @@ branches: only: - master script: -- goveralls -v -service travis-ci -ignore arraycontainer_gen.go,bitmapcontainer_gen.go,rle16_gen.go,rle_gen.go,roaringarray_gen.go,rle.go || go test +- goveralls -v -service travis-ci -ignore rle16_gen.go,rle_gen.go,rle.go || go test - go test -race -run TestConcurrent* - go build -tags appengine - go test -tags appengine diff --git a/vendor/github.com/RoaringBitmap/roaring/Makefile b/vendor/github.com/RoaringBitmap/roaring/Makefile index 906bd72..0a4f9f0 100644 --- a/vendor/github.com/RoaringBitmap/roaring/Makefile +++ b/vendor/github.com/RoaringBitmap/roaring/Makefile @@ -64,7 +64,7 @@ qa: fmtcheck test vet lint # Get the dependencies deps: GOPATH=$(GOPATH) go get github.com/stretchr/testify - GOPATH=$(GOPATH) go get github.com/willf/bitset + GOPATH=$(GOPATH) go get github.com/bits-and-blooms/bitset GOPATH=$(GOPATH) go get github.com/golang/lint/golint GOPATH=$(GOPATH) go get github.com/mschoch/smat GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz @@ -97,10 +97,6 @@ nuke: rm -rf ./target GOPATH=$(GOPATH) go clean -i ./... - -ser: - go generate - cover: go test -coverprofile=coverage.out go tool cover -html=coverage.out diff --git a/vendor/github.com/RoaringBitmap/roaring/README.md b/vendor/github.com/RoaringBitmap/roaring/README.md index 00d2351..2a7a129 100644 --- a/vendor/github.com/RoaringBitmap/roaring/README.md +++ b/vendor/github.com/RoaringBitmap/roaring/README.md @@ -1,4 +1,4 @@ -roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring) +roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring) [![Build Status](https://cloud.drone.io/api/badges/RoaringBitmap/roaring/status.svg)](https://cloud.drone.io/RoaringBitmap/roaring) ![Go-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-CI/badge.svg) ![Go-ARM-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-ARM-CI/badge.svg) @@ -84,7 +84,7 @@ When the bitset approach is applicable, it can be orders of magnitude faster than other possible implementation of a set (e.g., as a hash set) while using several times less memory. -However, a bitset, even a compressed one is not always applicable. For example, if the +However, a bitset, even a compressed one is not always applicable. For example, if you have 1000 random-looking integers, then a simple array might be the best representation. We refer to this case as the "sparse" scenario. @@ -158,7 +158,7 @@ http://arxiv.org/abs/1402.6407 This paper used data from http://lemire.me/data/r Dependencies are fetched automatically by giving the `-t` flag to `go get`. they include - - github.com/willf/bitset + - github.com/bits-and-blooms/bitset - github.com/mschoch/smat - github.com/glycerine/go-unsnap-stream - github.com/philhofer/fwd @@ -384,12 +384,14 @@ You can help us test further the library with fuzzy testing: go get github.com/dvyukov/go-fuzz/go-fuzz-build go test -tags=gofuzz -run=TestGenerateSmatCorpus go-fuzz-build github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 + go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 -func FuzzSmat Let it run, and if the # of crashers is > 0, check out the reports in the workdir where you should be able to find the panic goroutine stack traces. +You may also replace `-func FuzzSmat` by `-func FuzzSerializationBuffer` or `-func FuzzSerializationStream`. + ### Alternative in Go There is a Go version wrapping the C/C++ implementation https://github.com/RoaringBitmap/gocroaring diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go index f8bb29b..80b7eec 100644 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go @@ -4,8 +4,6 @@ import ( "fmt" ) -//go:generate msgp -unexported - type arrayContainer struct { content []uint16 } @@ -18,10 +16,11 @@ func (ac *arrayContainer) String() string { return s + "}" } -func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { +func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { for k := 0; k < len(ac.content); k++ { x[k+i] = uint32(ac.content[k]) | mask } + return i + len(ac.content) } func (ac *arrayContainer) iterate(cb func(x uint16) bool) bool { @@ -396,11 +395,19 @@ func (ac *arrayContainer) iorBitmap(bc2 *bitmapContainer) container { } func (ac *arrayContainer) iorRun16(rc *runContainer16) container { - bc1 := ac.toBitmapContainer() - bc2 := rc.toBitmapContainer() - bc1.iorBitmap(bc2) - *ac = *newArrayContainerFromBitmap(bc1) - return ac + runCardinality := rc.getCardinality() + // heuristic for if the container should maybe be an + // array container. + if runCardinality < ac.getCardinality() && + runCardinality+ac.getCardinality() < arrayDefaultMaxSize { + var result container + result = ac + for _, run := range rc.iv { + result = result.iaddRange(int(run.start), int(run.start)+int(run.length)+1) + } + return result + } + return rc.orArray(ac) } func (ac *arrayContainer) lazyIOR(a container) container { @@ -485,7 +492,7 @@ func (ac *arrayContainer) orArrayCardinality(value2 *arrayContainer) int { func (ac *arrayContainer) lazyorArray(value2 *arrayContainer) container { value1 := ac maxPossibleCardinality := value1.getCardinality() + value2.getCardinality() - if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap!^M + if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap! bc := newBitmapContainer() for k := 0; k < len(value2.content); k++ { v := value2.content[k] @@ -845,6 +852,10 @@ func (ac *arrayContainer) getCardinality() int { return len(ac.content) } +func (ac *arrayContainer) isEmpty() bool { + return len(ac.content) == 0 +} + func (ac *arrayContainer) rank(x uint16) int { answer := binarySearch(ac.content, x) if answer >= 0 { @@ -884,7 +895,7 @@ func (ac *arrayContainer) resetTo(a container) { x.fillArray(ac.content) case *runContainer16: - card := int(x.cardinality()) + card := int(x.getCardinality()) ac.realloc(card) cur := 0 for _, r := range x.iv { @@ -958,10 +969,10 @@ func (ac *arrayContainer) numberOfRuns() (nr int) { runlen++ } else { if cur < prev { - panic("then fundamental arrayContainer assumption of sorted ac.content was broken") + panic("the fundamental arrayContainer assumption of sorted ac.content was broken") } if cur == prev { - panic("then fundamental arrayContainer assumption of deduplicated content was broken") + panic("the fundamental arrayContainer assumption of deduplicated content was broken") } else { nr++ runlen = 0 diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go deleted file mode 100644 index 6ee670e..0000000 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go +++ /dev/null @@ -1,134 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbzg uint32 - zbzg, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbzg > 0 { - zbzg-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "content": - var zbai uint32 - zbai, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.content) >= int(zbai) { - z.content = (z.content)[:zbai] - } else { - z.content = make([]uint16, zbai) - } - for zxvk := range z.content { - z.content[zxvk], err = dc.ReadUint16() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "content" - err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.content))) - if err != nil { - return - } - for zxvk := range z.content { - err = en.WriteUint16(z.content[zxvk]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "content" - o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74) - o = msgp.AppendArrayHeader(o, uint32(len(z.content))) - for zxvk := range z.content { - o = msgp.AppendUint16(o, z.content[zxvk]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zcmr uint32 - zcmr, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcmr > 0 { - zcmr-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "content": - var zajw uint32 - zajw, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.content) >= int(zajw) { - z.content = (z.content)[:zajw] - } else { - z.content = make([]uint16, zajw) - } - for zxvk := range z.content { - z.content[zxvk], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *arrayContainer) Msgsize() (s int) { - s = 1 + 8 + msgp.ArrayHeaderSize + (len(z.content) * (msgp.Uint16Size)) - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go index e09b091..f8367da 100644 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go @@ -5,8 +5,6 @@ import ( "unsafe" ) -//go:generate msgp -unexported - type bitmapContainer struct { cardinality int bitmap []uint64 @@ -115,7 +113,7 @@ type bitmapContainerShortIterator struct { func (bcsi *bitmapContainerShortIterator) next() uint16 { j := bcsi.i - bcsi.i = bcsi.ptr.NextSetBit(bcsi.i + 1) + bcsi.i = bcsi.ptr.NextSetBit(uint(bcsi.i) + 1) return uint16(j) } func (bcsi *bitmapContainerShortIterator) hasNext() bool { @@ -128,7 +126,7 @@ func (bcsi *bitmapContainerShortIterator) peekNext() uint16 { func (bcsi *bitmapContainerShortIterator) advanceIfNeeded(minval uint16) { if bcsi.hasNext() && bcsi.peekNext() < minval { - bcsi.i = bcsi.ptr.NextSetBit(int(minval)) + bcsi.i = bcsi.ptr.NextSetBit(uint(minval)) } } @@ -266,7 +264,7 @@ func bitmapEquals(a, b []uint64) bool { return true } -func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { +func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { // TODO: should be written as optimized assembly pos := i base := mask @@ -280,6 +278,7 @@ func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask ui } base += 64 } + return pos } func (bc *bitmapContainer) equals(o container) bool { @@ -351,6 +350,11 @@ func (bc *bitmapContainer) getCardinality() int { return bc.cardinality } + +func (bc *bitmapContainer) isEmpty() bool { + return bc.cardinality == 0 +} + func (bc *bitmapContainer) clone() container { ptr := bitmapContainer{bc.cardinality, make([]uint64, len(bc.bitmap))} copy(ptr.bitmap, bc.bitmap[:]) @@ -1009,20 +1013,23 @@ func (bc *bitmapContainer) fillArray(container []uint16) { } } -func (bc *bitmapContainer) NextSetBit(i int) int { - x := i / 64 - if x >= len(bc.bitmap) { +func (bc *bitmapContainer) NextSetBit(i uint) int { + var ( + x = i / 64 + length = uint(len(bc.bitmap)) + ) + if x >= length { return -1 } w := bc.bitmap[x] w = w >> uint(i%64) if w != 0 { - return i + countTrailingZeros(w) + return int(i) + countTrailingZeros(w) } x++ - for ; x < len(bc.bitmap); x++ { + for ; x < length; x++ { if bc.bitmap[x] != 0 { - return (x * 64) + countTrailingZeros(bc.bitmap[x]) + return int(x*64) + countTrailingZeros(bc.bitmap[x]) } } return -1 @@ -1131,16 +1138,12 @@ func (bc *bitmapContainer) addOffset(x uint16) []container { low.bitmap[b] = bc.bitmap[0] << i for k := uint32(1); k < end; k++ { newval := bc.bitmap[k] << i - if newval == 0 { - newval = bc.bitmap[k-1] >> (64 - i) - } + newval |= bc.bitmap[k-1] >> (64 - i) low.bitmap[b+k] = newval } for k := end; k < 1024; k++ { newval := bc.bitmap[k] << i - if newval == 0 { - newval = bc.bitmap[k-1] >> (64 - i) - } + newval |= bc.bitmap[k-1] >> (64 - i) high.bitmap[k-end] = newval } high.bitmap[b] = bc.bitmap[1023] >> (64 - i) diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go deleted file mode 100644 index 9b5a465..0000000 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go +++ /dev/null @@ -1,415 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbzg uint32 - zbzg, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbzg > 0 { - zbzg-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.cardinality, err = dc.ReadInt() - if err != nil { - return - } - case "bitmap": - var zbai uint32 - zbai, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.bitmap) >= int(zbai) { - z.bitmap = (z.bitmap)[:zbai] - } else { - z.bitmap = make([]uint64, zbai) - } - for zxvk := range z.bitmap { - z.bitmap[zxvk], err = dc.ReadUint64() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "cardinality" - err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - if err != nil { - return err - } - err = en.WriteInt(z.cardinality) - if err != nil { - return - } - // write "bitmap" - err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.bitmap))) - if err != nil { - return - } - for zxvk := range z.bitmap { - err = en.WriteUint64(z.bitmap[zxvk]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "cardinality" - o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - o = msgp.AppendInt(o, z.cardinality) - // string "bitmap" - o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - o = msgp.AppendArrayHeader(o, uint32(len(z.bitmap))) - for zxvk := range z.bitmap { - o = msgp.AppendUint64(o, z.bitmap[zxvk]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zcmr uint32 - zcmr, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcmr > 0 { - zcmr-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.cardinality, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - case "bitmap": - var zajw uint32 - zajw, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.bitmap) >= int(zajw) { - z.bitmap = (z.bitmap)[:zajw] - } else { - z.bitmap = make([]uint64, zajw) - } - for zxvk := range z.bitmap { - z.bitmap[zxvk], bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *bitmapContainer) Msgsize() (s int) { - s = 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.bitmap) * (msgp.Uint64Size)) - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zhct uint32 - zhct, err = dc.ReadMapHeader() - if err != nil { - return - } - for zhct > 0 { - zhct-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "ptr": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.ptr = nil - } else { - if z.ptr == nil { - z.ptr = new(bitmapContainer) - } - var zcua uint32 - zcua, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcua > 0 { - zcua-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.ptr.cardinality, err = dc.ReadInt() - if err != nil { - return - } - case "bitmap": - var zxhx uint32 - zxhx, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.ptr.bitmap) >= int(zxhx) { - z.ptr.bitmap = (z.ptr.bitmap)[:zxhx] - } else { - z.ptr.bitmap = make([]uint64, zxhx) - } - for zwht := range z.ptr.bitmap { - z.ptr.bitmap[zwht], err = dc.ReadUint64() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "i": - z.i, err = dc.ReadInt() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "ptr" - err = en.Append(0x82, 0xa3, 0x70, 0x74, 0x72) - if err != nil { - return err - } - if z.ptr == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - // map header, size 2 - // write "cardinality" - err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - if err != nil { - return err - } - err = en.WriteInt(z.ptr.cardinality) - if err != nil { - return - } - // write "bitmap" - err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.ptr.bitmap))) - if err != nil { - return - } - for zwht := range z.ptr.bitmap { - err = en.WriteUint64(z.ptr.bitmap[zwht]) - if err != nil { - return - } - } - } - // write "i" - err = en.Append(0xa1, 0x69) - if err != nil { - return err - } - err = en.WriteInt(z.i) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "ptr" - o = append(o, 0x82, 0xa3, 0x70, 0x74, 0x72) - if z.ptr == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "cardinality" - o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - o = msgp.AppendInt(o, z.ptr.cardinality) - // string "bitmap" - o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - o = msgp.AppendArrayHeader(o, uint32(len(z.ptr.bitmap))) - for zwht := range z.ptr.bitmap { - o = msgp.AppendUint64(o, z.ptr.bitmap[zwht]) - } - } - // string "i" - o = append(o, 0xa1, 0x69) - o = msgp.AppendInt(o, z.i) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zlqf uint32 - zlqf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zlqf > 0 { - zlqf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "ptr": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.ptr = nil - } else { - if z.ptr == nil { - z.ptr = new(bitmapContainer) - } - var zdaf uint32 - zdaf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zdaf > 0 { - zdaf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.ptr.cardinality, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - case "bitmap": - var zpks uint32 - zpks, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.ptr.bitmap) >= int(zpks) { - z.ptr.bitmap = (z.ptr.bitmap)[:zpks] - } else { - z.ptr.bitmap = make([]uint64, zpks) - } - for zwht := range z.ptr.bitmap { - z.ptr.bitmap[zwht], bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "i": - z.i, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *bitmapContainerShortIterator) Msgsize() (s int) { - s = 1 + 4 - if z.ptr == nil { - s += msgp.NilSize - } else { - s += 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ptr.bitmap) * (msgp.Uint64Size)) - } - s += 2 + msgp.IntSize - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/byte_input.go b/vendor/github.com/RoaringBitmap/roaring/byte_input.go deleted file mode 100644 index f7a98a1..0000000 --- a/vendor/github.com/RoaringBitmap/roaring/byte_input.go +++ /dev/null @@ -1,161 +0,0 @@ -package roaring - -import ( - "encoding/binary" - "io" -) - -type byteInput interface { - // next returns a slice containing the next n bytes from the buffer, - // advancing the buffer as if the bytes had been returned by Read. - next(n int) ([]byte, error) - // readUInt32 reads uint32 with LittleEndian order - readUInt32() (uint32, error) - // readUInt16 reads uint16 with LittleEndian order - readUInt16() (uint16, error) - // getReadBytes returns read bytes - getReadBytes() int64 - // skipBytes skips exactly n bytes - skipBytes(n int) error -} - -func newByteInputFromReader(reader io.Reader) byteInput { - return &byteInputAdapter{ - r: reader, - readBytes: 0, - } -} - -func newByteInput(buf []byte) byteInput { - return &byteBuffer{ - buf: buf, - off: 0, - } -} - -type byteBuffer struct { - buf []byte - off int -} - -// next returns a slice containing the next n bytes from the reader -// If there are fewer bytes than the given n, io.ErrUnexpectedEOF will be returned -func (b *byteBuffer) next(n int) ([]byte, error) { - m := len(b.buf) - b.off - - if n > m { - return nil, io.ErrUnexpectedEOF - } - - data := b.buf[b.off : b.off+n] - b.off += n - - return data, nil -} - -// readUInt32 reads uint32 with LittleEndian order -func (b *byteBuffer) readUInt32() (uint32, error) { - if len(b.buf)-b.off < 4 { - return 0, io.ErrUnexpectedEOF - } - - v := binary.LittleEndian.Uint32(b.buf[b.off:]) - b.off += 4 - - return v, nil -} - -// readUInt16 reads uint16 with LittleEndian order -func (b *byteBuffer) readUInt16() (uint16, error) { - if len(b.buf)-b.off < 2 { - return 0, io.ErrUnexpectedEOF - } - - v := binary.LittleEndian.Uint16(b.buf[b.off:]) - b.off += 2 - - return v, nil -} - -// getReadBytes returns read bytes -func (b *byteBuffer) getReadBytes() int64 { - return int64(b.off) -} - -// skipBytes skips exactly n bytes -func (b *byteBuffer) skipBytes(n int) error { - m := len(b.buf) - b.off - - if n > m { - return io.ErrUnexpectedEOF - } - - b.off += n - - return nil -} - -// reset resets the given buffer with a new byte slice -func (b *byteBuffer) reset(buf []byte) { - b.buf = buf - b.off = 0 -} - -type byteInputAdapter struct { - r io.Reader - readBytes int -} - -// next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -func (b *byteInputAdapter) next(n int) ([]byte, error) { - buf := make([]byte, n) - m, err := io.ReadAtLeast(b.r, buf, n) - b.readBytes += m - - if err != nil { - return nil, err - } - - return buf, nil -} - -// readUInt32 reads uint32 with LittleEndian order -func (b *byteInputAdapter) readUInt32() (uint32, error) { - buf, err := b.next(4) - - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint32(buf), nil -} - -// readUInt16 reads uint16 with LittleEndian order -func (b *byteInputAdapter) readUInt16() (uint16, error) { - buf, err := b.next(2) - - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint16(buf), nil -} - -// getReadBytes returns read bytes -func (b *byteInputAdapter) getReadBytes() int64 { - return int64(b.readBytes) -} - -// skipBytes skips exactly n bytes -func (b *byteInputAdapter) skipBytes(n int) error { - _, err := b.next(n) - - return err -} - -// reset resets the given buffer with a new stream -func (b *byteInputAdapter) reset(stream io.Reader) { - b.r = stream - b.readBytes = 0 -} diff --git a/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go b/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go index ae731b3..47bda71 100644 --- a/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go +++ b/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go @@ -33,15 +33,6 @@ main: s2 = x2.highlowcontainer.getKeyAtIndex(pos2) } else { c1 := x1.highlowcontainer.getContainerAtIndex(pos1) - switch t := c1.(type) { - case *arrayContainer: - c1 = t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - c1 = t.toBitmapContainer() - } - } - answer.highlowcontainer.appendContainer(s1, c1.lazyOR(x2.highlowcontainer.getContainerAtIndex(pos2)), false) pos1++ pos2++ @@ -89,18 +80,7 @@ main: } s2 = x2.highlowcontainer.getKeyAtIndex(pos2) } else { - c1 := x1.highlowcontainer.getContainerAtIndex(pos1) - switch t := c1.(type) { - case *arrayContainer: - c1 = t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - c1 = t.toBitmapContainer() - } - case *bitmapContainer: - c1 = x1.highlowcontainer.getWritableContainerAtIndex(pos1) - } - + c1 := x1.highlowcontainer.getWritableContainerAtIndex(pos1) x1.highlowcontainer.containers[pos1] = c1.lazyIOR(x2.highlowcontainer.getContainerAtIndex(pos2)) x1.highlowcontainer.needCopyOnWrite[pos1] = false pos1++ @@ -301,9 +281,6 @@ func (x1 *Bitmap) AndAny(bitmaps ...*Bitmap) { tmpBitmap = newBitmapContainer() } tmpBitmap.resetTo(keyContainers[0]) - for _, c := range keyContainers[1:] { - tmpBitmap.ior(c) - } ored = tmpBitmap } else { if tmpArray == nil { @@ -311,15 +288,15 @@ func (x1 *Bitmap) AndAny(bitmaps ...*Bitmap) { } tmpArray.realloc(maxPossibleOr) tmpArray.resetTo(keyContainers[0]) - for _, c := range keyContainers[1:] { - tmpArray.ior(c) - } ored = tmpArray } + for _, c := range keyContainers[1:] { + ored = ored.ior(c) + } } result := x1.highlowcontainer.getWritableContainerAtIndex(basePos).iand(ored) - if result.getCardinality() > 0 { + if !result.isEmpty() { x1.highlowcontainer.replaceKeyAndContainerAtIndex(intersections, baseKey, result, false) intersections++ } diff --git a/vendor/github.com/RoaringBitmap/roaring/go.mod b/vendor/github.com/RoaringBitmap/roaring/go.mod index 5e9db36..d87811d 100644 --- a/vendor/github.com/RoaringBitmap/roaring/go.mod +++ b/vendor/github.com/RoaringBitmap/roaring/go.mod @@ -3,16 +3,7 @@ module github.com/RoaringBitmap/roaring go 1.14 require ( - github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 - github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect - github.com/golang/snappy v0.0.1 // indirect - github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 // indirect - github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae - github.com/philhofer/fwd v1.0.0 // indirect + github.com/bits-and-blooms/bitset v1.2.0 + github.com/mschoch/smat v0.2.0 github.com/stretchr/testify v1.4.0 - github.com/tinylib/msgp v1.1.0 - github.com/willf/bitset v1.1.10 - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/tools v0.0.0-20200928182047-19e03678916f // indirect ) diff --git a/vendor/github.com/RoaringBitmap/roaring/go.sum b/vendor/github.com/RoaringBitmap/roaring/go.sum index c01900e..cd059d8 100644 --- a/vendor/github.com/RoaringBitmap/roaring/go.sum +++ b/vendor/github.com/RoaringBitmap/roaring/go.sum @@ -1,54 +1,14 @@ +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200928182047-19e03678916f h1:VwGa2Wf+rHGIxvsssCkUNIyFv8jQY0VCBCNWtikoWq0= -golang.org/x/tools v0.0.0-20200928182047-19e03678916f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= diff --git a/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go b/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go new file mode 100644 index 0000000..3e5490a --- /dev/null +++ b/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go @@ -0,0 +1,166 @@ +package internal + +import ( + "encoding/binary" + "io" +) + +// ByteInput typed interface around io.Reader or raw bytes +type ByteInput interface { + // Next returns a slice containing the next n bytes from the buffer, + // advancing the buffer as if the bytes had been returned by Read. + Next(n int) ([]byte, error) + // ReadUInt32 reads uint32 with LittleEndian order + ReadUInt32() (uint32, error) + // ReadUInt16 reads uint16 with LittleEndian order + ReadUInt16() (uint16, error) + // GetReadBytes returns read bytes + GetReadBytes() int64 + // SkipBytes skips exactly n bytes + SkipBytes(n int) error +} + +// NewByteInputFromReader creates reader wrapper +func NewByteInputFromReader(reader io.Reader) ByteInput { + return &ByteInputAdapter{ + r: reader, + readBytes: 0, + } +} + +// NewByteInput creates raw bytes wrapper +func NewByteInput(buf []byte) ByteInput { + return &ByteBuffer{ + buf: buf, + off: 0, + } +} + +// ByteBuffer raw bytes wrapper +type ByteBuffer struct { + buf []byte + off int +} + +// Next returns a slice containing the next n bytes from the reader +// If there are fewer bytes than the given n, io.ErrUnexpectedEOF will be returned +func (b *ByteBuffer) Next(n int) ([]byte, error) { + m := len(b.buf) - b.off + + if n > m { + return nil, io.ErrUnexpectedEOF + } + + data := b.buf[b.off : b.off+n] + b.off += n + + return data, nil +} + +// ReadUInt32 reads uint32 with LittleEndian order +func (b *ByteBuffer) ReadUInt32() (uint32, error) { + if len(b.buf)-b.off < 4 { + return 0, io.ErrUnexpectedEOF + } + + v := binary.LittleEndian.Uint32(b.buf[b.off:]) + b.off += 4 + + return v, nil +} + +// ReadUInt16 reads uint16 with LittleEndian order +func (b *ByteBuffer) ReadUInt16() (uint16, error) { + if len(b.buf)-b.off < 2 { + return 0, io.ErrUnexpectedEOF + } + + v := binary.LittleEndian.Uint16(b.buf[b.off:]) + b.off += 2 + + return v, nil +} + +// GetReadBytes returns read bytes +func (b *ByteBuffer) GetReadBytes() int64 { + return int64(b.off) +} + +// SkipBytes skips exactly n bytes +func (b *ByteBuffer) SkipBytes(n int) error { + m := len(b.buf) - b.off + + if n > m { + return io.ErrUnexpectedEOF + } + + b.off += n + + return nil +} + +// Reset resets the given buffer with a new byte slice +func (b *ByteBuffer) Reset(buf []byte) { + b.buf = buf + b.off = 0 +} + +// ByteInputAdapter reader wrapper +type ByteInputAdapter struct { + r io.Reader + readBytes int +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +func (b *ByteInputAdapter) Next(n int) ([]byte, error) { + buf := make([]byte, n) + m, err := io.ReadAtLeast(b.r, buf, n) + b.readBytes += m + + if err != nil { + return nil, err + } + + return buf, nil +} + +// ReadUInt32 reads uint32 with LittleEndian order +func (b *ByteInputAdapter) ReadUInt32() (uint32, error) { + buf, err := b.Next(4) + + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint32(buf), nil +} + +// ReadUInt16 reads uint16 with LittleEndian order +func (b *ByteInputAdapter) ReadUInt16() (uint16, error) { + buf, err := b.Next(2) + + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint16(buf), nil +} + +// GetReadBytes returns read bytes +func (b *ByteInputAdapter) GetReadBytes() int64 { + return int64(b.readBytes) +} + +// SkipBytes skips exactly n bytes +func (b *ByteInputAdapter) SkipBytes(n int) error { + _, err := b.Next(n) + + return err +} + +// Reset resets the given buffer with a new stream +func (b *ByteInputAdapter) Reset(stream io.Reader) { + b.r = stream + b.readBytes = 0 +} diff --git a/vendor/github.com/RoaringBitmap/roaring/internal/pools.go b/vendor/github.com/RoaringBitmap/roaring/internal/pools.go new file mode 100644 index 0000000..d258356 --- /dev/null +++ b/vendor/github.com/RoaringBitmap/roaring/internal/pools.go @@ -0,0 +1,21 @@ +package internal + +import ( + "sync" +) + +var ( + // ByteInputAdapterPool shared pool + ByteInputAdapterPool = sync.Pool{ + New: func() interface{} { + return &ByteInputAdapter{} + }, + } + + // ByteBufferPool shared pool + ByteBufferPool = sync.Pool{ + New: func() interface{} { + return &ByteBuffer{} + }, + } +) diff --git a/vendor/github.com/RoaringBitmap/roaring/parallel.go b/vendor/github.com/RoaringBitmap/roaring/parallel.go index 2af1aed..9208e3e 100644 --- a/vendor/github.com/RoaringBitmap/roaring/parallel.go +++ b/vendor/github.com/RoaringBitmap/roaring/parallel.go @@ -166,7 +166,6 @@ func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer make([]container, 0, expectedKeys), make([]bool, 0, expectedKeys), false, - nil, }, } for i := range keys { @@ -286,14 +285,14 @@ func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap { for input := range inputChan { c := input.containers[0].and(input.containers[1]) for _, next := range input.containers[2:] { - if c.getCardinality() == 0 { + if c.isEmpty() { break } c = c.iand(next) } // Send a nil explicitly if the result of the intersection is an empty container - if c.getCardinality() == 0 { + if c.isEmpty() { c = nil } @@ -355,10 +354,10 @@ func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap { if lKey == MaxUint16 && hKey == 0 { return New() } else if len(bitmaps) == 1 { - return bitmaps[0] + return bitmaps[0].Clone() } - keyRange := hKey - lKey + 1 + keyRange := int(hKey) - int(lKey) + 1 if keyRange == 1 { // revert to FastOr. Since the key range is 0 // no container-level aggregation parallelism is achievable diff --git a/vendor/github.com/RoaringBitmap/roaring/roaring.go b/vendor/github.com/RoaringBitmap/roaring/roaring.go index 2afb395..53068e4 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaring.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaring.go @@ -11,7 +11,8 @@ import ( "fmt" "io" "strconv" - "sync" + + "github.com/RoaringBitmap/roaring/internal" ) // Bitmap represents a compressed bitmap where you can add integers. @@ -52,27 +53,19 @@ func (rb *Bitmap) ToBytes() ([]byte, error) { return rb.highlowcontainer.toBytes() } -// Deprecated: WriteToMsgpack writes a msgpack2/snappy-streaming compressed serialized -// version of this bitmap to stream. The format is not -// compatible with the WriteTo() format, and is -// experimental: it may produce smaller on disk -// footprint and/or be faster to read, depending -// on your content. Currently only the Go roaring -// implementation supports this format. -func (rb *Bitmap) WriteToMsgpack(stream io.Writer) (int64, error) { - return 0, rb.highlowcontainer.writeToMsgpack(stream) -} - // ReadFrom reads a serialized version of this bitmap from stream. // The format is compatible with other RoaringBitmap // implementations (Java, C) and is documented here: // https://github.com/RoaringBitmap/RoaringFormatSpec -func (rb *Bitmap) ReadFrom(reader io.Reader) (p int64, err error) { - stream := byteInputAdapterPool.Get().(*byteInputAdapter) - stream.reset(reader) +// Since io.Reader is regarded as a stream and cannot be read twice. +// So add cookieHeader to accept the 4-byte data that has been read in roaring64.ReadFrom. +// It is not necessary to pass cookieHeader when call roaring.ReadFrom to read the roaring32 data directly. +func (rb *Bitmap) ReadFrom(reader io.Reader, cookieHeader ...byte) (p int64, err error) { + stream := internal.ByteInputAdapterPool.Get().(*internal.ByteInputAdapter) + stream.Reset(reader) - p, err = rb.highlowcontainer.readFrom(stream) - byteInputAdapterPool.Put(stream) + p, err = rb.highlowcontainer.readFrom(stream, cookieHeader...) + internal.ByteInputAdapterPool.Put(stream) return } @@ -100,29 +93,15 @@ func (rb *Bitmap) ReadFrom(reader io.Reader) (p int64, err error) { // call CloneCopyOnWriteContainers on all such bitmaps. // func (rb *Bitmap) FromBuffer(buf []byte) (p int64, err error) { - stream := byteBufferPool.Get().(*byteBuffer) - stream.reset(buf) + stream := internal.ByteBufferPool.Get().(*internal.ByteBuffer) + stream.Reset(buf) p, err = rb.highlowcontainer.readFrom(stream) - byteBufferPool.Put(stream) + internal.ByteBufferPool.Put(stream) return } -var ( - byteBufferPool = sync.Pool{ - New: func() interface{} { - return &byteBuffer{} - }, - } - - byteInputAdapterPool = sync.Pool{ - New: func() interface{} { - return &byteInputAdapter{} - }, - } -) - // RunOptimize attempts to further compress the runs of consecutive values found in the bitmap func (rb *Bitmap) RunOptimize() { rb.highlowcontainer.runOptimize() @@ -133,14 +112,6 @@ func (rb *Bitmap) HasRunCompression() bool { return rb.highlowcontainer.hasRunCompression() } -// Deprecated: ReadFromMsgpack reads a msgpack2/snappy-streaming serialized -// version of this bitmap from stream. The format is -// expected is that written by the WriteToMsgpack() -// call; see additional notes there. -func (rb *Bitmap) ReadFromMsgpack(stream io.Reader) (int64, error) { - return 0, rb.highlowcontainer.readFromMsgpack(stream) -} - // MarshalBinary implements the encoding.BinaryMarshaler interface for the bitmap // (same as ToBytes) func (rb *Bitmap) MarshalBinary() ([]byte, error) { @@ -180,8 +151,7 @@ func (rb *Bitmap) ToArray() []uint32 { hs := uint32(rb.highlowcontainer.getKeyAtIndex(pos)) << 16 c := rb.highlowcontainer.getContainerAtIndex(pos) pos++ - c.fillLeastSignificant16bits(array, pos2, hs) - pos2 += c.getCardinality() + pos2 = c.fillLeastSignificant16bits(array, pos2, hs) } return array } @@ -571,7 +541,7 @@ func AddOffset64(x *Bitmap, offset int64) (answer *Bitmap) { c := x.highlowcontainer.getContainerAtIndex(pos) offsetted := c.addOffset(inOffset) - if offsetted[0].getCardinality() > 0 && (key >= 0 && key <= MaxUint16) { + if !offsetted[0].isEmpty() && (key >= 0 && key <= MaxUint16) { curSize := answer.highlowcontainer.size() lastkey := int32(0) @@ -588,7 +558,7 @@ func AddOffset64(x *Bitmap, offset int64) (answer *Bitmap) { } } - if offsetted[1].getCardinality() > 0 && ((key+1) >= 0 && (key+1) <= MaxUint16) { + if !offsetted[1].isEmpty() && ((key+1) >= 0 && (key+1) <= MaxUint16) { answer.highlowcontainer.appendContainer(uint16(key+1), offsetted[1], false) } } @@ -659,13 +629,13 @@ func (rb *Bitmap) Remove(x uint32) { if i >= 0 { c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveReturnMinimized(lowbits(x)) rb.highlowcontainer.setContainerAtIndex(i, c) - if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 { + if rb.highlowcontainer.getContainerAtIndex(i).isEmpty() { rb.highlowcontainer.removeAtIndex(i) } } } -// CheckedRemove removes the integer x from the bitmap and return true if the integer was effectively remove (and false if the integer was not present) +// CheckedRemove removes the integer x from the bitmap and return true if the integer was effectively removed (and false if the integer was not present) func (rb *Bitmap) CheckedRemove(x uint32) bool { // TODO: add unit tests for this method hb := highbits(x) @@ -675,7 +645,7 @@ func (rb *Bitmap) CheckedRemove(x uint32) bool { oldcard := C.getCardinality() C = C.iremoveReturnMinimized(lowbits(x)) rb.highlowcontainer.setContainerAtIndex(i, C) - if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 { + if rb.highlowcontainer.getContainerAtIndex(i).isEmpty() { rb.highlowcontainer.removeAtIndex(i) return true } @@ -730,8 +700,9 @@ func (rb *Bitmap) Select(x uint32) (uint32, error) { remaining := x for i := 0; i < rb.highlowcontainer.size(); i++ { c := rb.highlowcontainer.getContainerAtIndex(i) - if remaining >= uint32(c.getCardinality()) { - remaining -= uint32(c.getCardinality()) + card := uint32(c.getCardinality()) + if remaining >= card { + remaining -= card } else { key := rb.highlowcontainer.getKeyAtIndex(i) return uint32(key)<<16 + uint32(c.selectInt(uint16(remaining))), nil @@ -758,7 +729,7 @@ main: c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1) c2 := x2.highlowcontainer.getContainerAtIndex(pos2) diff := c1.iand(c2) - if diff.getCardinality() > 0 { + if !diff.isEmpty() { rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false) intersectionsize++ } @@ -960,7 +931,7 @@ func (rb *Bitmap) Xor(x2 *Bitmap) { } else { // TODO: couple be computed in-place for reduced memory usage c := rb.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(pos1, c) pos1++ } else { @@ -1040,7 +1011,7 @@ main: c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1) c2 := x2.highlowcontainer.getContainerAtIndex(pos2) diff := c1.iandNot(c2) - if diff.getCardinality() > 0 { + if !diff.isEmpty() { rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false) intersectionsize++ } @@ -1149,7 +1120,7 @@ main: C := x1.highlowcontainer.getContainerAtIndex(pos1) C = C.and(x2.highlowcontainer.getContainerAtIndex(pos2)) - if C.getCardinality() > 0 { + if !C.isEmpty() { answer.highlowcontainer.appendContainer(s1, C, false) } pos1++ @@ -1196,7 +1167,7 @@ func Xor(x1, x2 *Bitmap) *Bitmap { pos2++ } else { c := x1.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2)) - if c.getCardinality() > 0 { + if !c.isEmpty() { answer.highlowcontainer.appendContainer(s1, c, false) } pos1++ @@ -1239,7 +1210,7 @@ main: c1 := x1.highlowcontainer.getContainerAtIndex(pos1) c2 := x2.highlowcontainer.getContainerAtIndex(pos2) diff := c1.andNot(c2) - if diff.getCardinality() > 0 { + if !diff.isEmpty() { answer.highlowcontainer.appendContainer(s1, diff, false) } pos1++ @@ -1329,7 +1300,7 @@ func (rb *Bitmap) Flip(rangeStart, rangeEnd uint64) { if i >= 0 { c := rb.highlowcontainer.getWritableContainerAtIndex(i).inot(int(containerStart), int(containerLast)+1) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(i, c) } else { rb.highlowcontainer.removeAtIndex(i) @@ -1410,7 +1381,7 @@ func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) { return } c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveRange(int(lbStart), int(lbLast+1)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(i, c) } else { rb.highlowcontainer.removeAtIndex(i) @@ -1423,7 +1394,7 @@ func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) { if ifirst >= 0 { if lbStart != 0 { c := rb.highlowcontainer.getWritableContainerAtIndex(ifirst).iremoveRange(int(lbStart), int(max+1)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(ifirst, c) ifirst++ } @@ -1434,7 +1405,7 @@ func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) { if ilast >= 0 { if lbLast != max { c := rb.highlowcontainer.getWritableContainerAtIndex(ilast).iremoveRange(int(0), int(lbLast+1)) - if c.getCardinality() > 0 { + if !c.isEmpty() { rb.highlowcontainer.setContainerAtIndex(ilast, c) } else { ilast++ @@ -1490,7 +1461,7 @@ func Flip(bm *Bitmap, rangeStart, rangeEnd uint64) *Bitmap { if i >= 0 { c := bm.highlowcontainer.getContainerAtIndex(i).not(int(containerStart), int(containerLast)+1) - if c.getCardinality() > 0 { + if !c.isEmpty() { answer.highlowcontainer.insertNewKeyValueAt(-j-1, uint16(hb), c) } @@ -1581,3 +1552,27 @@ func (rb *Bitmap) Stats() Statistics { } return stats } + +func (rb *Bitmap) checkValidity() bool { + for _, c := range rb.highlowcontainer.containers { + + switch c.(type) { + case *arrayContainer: + if c.getCardinality() > arrayDefaultMaxSize { + fmt.Println("Array containers are limited to size ", arrayDefaultMaxSize) + return false + } + case *bitmapContainer: + if c.getCardinality() <= arrayDefaultMaxSize { + fmt.Println("Bitmaps would be more concise as an array!") + return false + } + case *runContainer16: + if c.getSizeInBytes() > minOfInt(bitmapContainerSizeInBytes(), arrayContainerSizeInBytes(c.getCardinality())) { + fmt.Println("Inefficient run container!") + return false + } + } + } + return true +} \ No newline at end of file diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go index d0c832b..f7b7d73 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go @@ -5,13 +5,9 @@ import ( "encoding/binary" "fmt" "io" - - snappy "github.com/glycerine/go-unsnap-stream" - "github.com/tinylib/msgp/msgp" + "github.com/RoaringBitmap/roaring/internal" ) -//go:generate msgp -unexported - type container interface { addOffset(uint16) []container @@ -21,6 +17,7 @@ type container interface { iand(container) container // i stands for inplace andNot(container) container iandNot(container) container // i stands for inplace + isEmpty() bool getCardinality() int // rank returns the number of integers that are // smaller or equal to x. rank(infinity) would be getCardinality(). @@ -51,7 +48,7 @@ type container interface { // any of the implementations. equals(r container) bool - fillLeastSignificant16bits(array []uint32, i int, mask uint32) + fillLeastSignificant16bits(array []uint32, i int, mask uint32) int or(r container) container orCardinality(r container) int isFull() bool @@ -103,18 +100,6 @@ type roaringArray struct { containers []container `msg:"-"` // don't try to serialize directly. needCopyOnWrite []bool copyOnWrite bool - - // conserz is used at serialization time - // to serialize containers. Otherwise empty. - conserz []containerSerz -} - -// containerSerz facilitates serializing container (tricky to -// serialize because it is an interface) by providing a -// light wrapper with a type identifier. -type containerSerz struct { - t contype `msg:"t"` // type - r msgp.Raw `msg:"r"` // Raw msgpack of the actual container type } func newRoaringArray() *roaringArray { @@ -246,7 +231,6 @@ func (ra *roaringArray) resize(newsize int) { func (ra *roaringArray) clear() { ra.resize(0) ra.copyOnWrite = false - ra.conserz = nil } func (ra *roaringArray) clone() *roaringArray { @@ -566,51 +550,58 @@ func (ra *roaringArray) toBytes() ([]byte, error) { return buf.Bytes(), err } -func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { - cookie, err := stream.readUInt32() - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err) +func (ra *roaringArray) readFrom(stream internal.ByteInput, cookieHeader ...byte) (int64, error) { + var cookie uint32 + var err error + if len(cookieHeader) > 0 && len(cookieHeader) != 4 { + return int64(len(cookieHeader)), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: incorrect size of cookie header") + } + if len(cookieHeader) == 4 { + cookie = binary.LittleEndian.Uint32(cookieHeader) + } else { + cookie, err = stream.ReadUInt32() + if err != nil { + return stream.GetReadBytes(), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err) + } } var size uint32 var isRunBitmap []byte if cookie&0x0000FFFF == serialCookie { - size = uint32(uint16(cookie>>16) + 1) + size = uint32(cookie>>16 + 1) // create is-run-container bitmap isRunBitmapSize := (int(size) + 7) / 8 - isRunBitmap, err = stream.next(isRunBitmapSize) + isRunBitmap, err = stream.Next(isRunBitmapSize) if err != nil { - return stream.getReadBytes(), fmt.Errorf("malformed bitmap, failed to read is-run bitmap, got: %s", err) + return stream.GetReadBytes(), fmt.Errorf("malformed bitmap, failed to read is-run bitmap, got: %s", err) } } else if cookie == serialCookieNoRunContainer { - size, err = stream.readUInt32() - + size, err = stream.ReadUInt32() if err != nil { - return stream.getReadBytes(), fmt.Errorf("malformed bitmap, failed to read a bitmap size: %s", err) + return stream.GetReadBytes(), fmt.Errorf("malformed bitmap, failed to read a bitmap size: %s", err) } } else { - return stream.getReadBytes(), fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header") + return stream.GetReadBytes(), fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header") } if size > (1 << 16) { - return stream.getReadBytes(), fmt.Errorf("it is logically impossible to have more than (1<<16) containers") + return stream.GetReadBytes(), fmt.Errorf("it is logically impossible to have more than (1<<16) containers") } // descriptive header - buf, err := stream.next(2 * 2 * int(size)) + buf, err := stream.Next(2 * 2 * int(size)) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read descriptive header: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read descriptive header: %s", err) } keycard := byteSliceAsUint16Slice(buf) if isRunBitmap == nil || size >= noOffsetThreshold { - if err := stream.skipBytes(int(size) * 4); err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to skip bytes: %s", err) + if err := stream.SkipBytes(int(size) * 4); err != nil { + return stream.GetReadBytes(), fmt.Errorf("failed to skip bytes: %s", err) } } @@ -641,30 +632,29 @@ func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { if isRunBitmap != nil && isRunBitmap[i/8]&(1<<(i%8)) != 0 { // run container - nr, err := stream.readUInt16() + nr, err := stream.ReadUInt16() if err != nil { return 0, fmt.Errorf("failed to read runtime container size: %s", err) } - buf, err := stream.next(int(nr) * 4) + buf, err := stream.Next(int(nr) * 4) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read runtime container content: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read runtime container content: %s", err) } nb := runContainer16{ iv: byteSliceAsInterval16Slice(buf), - card: int64(card), } ra.containers[i] = &nb } else if card > arrayDefaultMaxSize { // bitmap container - buf, err := stream.next(arrayDefaultMaxSize * 2) + buf, err := stream.Next(arrayDefaultMaxSize * 2) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read bitmap container: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read bitmap container: %s", err) } nb := bitmapContainer{ @@ -675,10 +665,10 @@ func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { ra.containers[i] = &nb } else { // array container - buf, err := stream.next(card * 2) + buf, err := stream.Next(card * 2) if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read array container: %s", err) + return stream.GetReadBytes(), fmt.Errorf("failed to read array container: %s", err) } nb := arrayContainer{ @@ -689,7 +679,7 @@ func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { } } - return stream.getReadBytes(), nil + return stream.GetReadBytes(), nil } func (ra *roaringArray) hasRunCompression() bool { @@ -702,84 +692,6 @@ func (ra *roaringArray) hasRunCompression() bool { return false } -func (ra *roaringArray) writeToMsgpack(stream io.Writer) error { - - ra.conserz = make([]containerSerz, len(ra.containers)) - for i, v := range ra.containers { - switch cn := v.(type) { - case *bitmapContainer: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = bitmapContype - ra.conserz[i].r = bts - case *arrayContainer: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = arrayContype - ra.conserz[i].r = bts - case *runContainer16: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = run16Contype - ra.conserz[i].r = bts - default: - panic(fmt.Errorf("Unrecognized container implementation: %T", cn)) - } - } - w := snappy.NewWriter(stream) - err := msgp.Encode(w, ra) - ra.conserz = nil - return err -} - -func (ra *roaringArray) readFromMsgpack(stream io.Reader) error { - r := snappy.NewReader(stream) - err := msgp.Decode(r, ra) - if err != nil { - return err - } - - if len(ra.containers) != len(ra.keys) { - ra.containers = make([]container, len(ra.keys)) - } - - for i, v := range ra.conserz { - switch v.t { - case bitmapContype: - c := &bitmapContainer{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - case arrayContype: - c := &arrayContainer{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - case run16Contype: - c := &runContainer16{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - default: - return fmt.Errorf("unrecognized contype serialization code: '%v'", v.t) - } - } - ra.conserz = nil - return nil -} - func (ra *roaringArray) advanceUntil(min uint16, pos int) int { lower := pos + 1 diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go deleted file mode 100644 index dcd7187..0000000 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go +++ /dev/null @@ -1,529 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import ( - "github.com/tinylib/msgp/msgp" -) - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zxvk uint32 - zxvk, err = dc.ReadMapHeader() - if err != nil { - return - } - for zxvk > 0 { - zxvk-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zbzg uint8 - zbzg, err = dc.ReadUint8() - z.t = contype(zbzg) - } - if err != nil { - return - } - case "r": - err = z.r.DecodeMsg(dc) - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "t" - err = en.Append(0x82, 0xa1, 0x74) - if err != nil { - return err - } - err = en.WriteUint8(uint8(z.t)) - if err != nil { - return - } - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return err - } - err = z.r.EncodeMsg(en) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "t" - o = append(o, 0x82, 0xa1, 0x74) - o = msgp.AppendUint8(o, uint8(z.t)) - // string "r" - o = append(o, 0xa1, 0x72) - o, err = z.r.MarshalMsg(o) - if err != nil { - return - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zbai uint32 - zbai, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zbai > 0 { - zbai-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zcmr uint8 - zcmr, bts, err = msgp.ReadUint8Bytes(bts) - z.t = contype(zcmr) - } - if err != nil { - return - } - case "r": - bts, err = z.r.UnmarshalMsg(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *containerSerz) Msgsize() (s int) { - s = 1 + 2 + msgp.Uint8Size + 2 + z.r.Msgsize() - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) { - { - var zajw uint8 - zajw, err = dc.ReadUint8() - (*z) = contype(zajw) - } - if err != nil { - return - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z contype) EncodeMsg(en *msgp.Writer) (err error) { - err = en.WriteUint8(uint8(z)) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z contype) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendUint8(o, uint8(z)) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) { - { - var zwht uint8 - zwht, bts, err = msgp.ReadUint8Bytes(bts) - (*z) = contype(zwht) - } - if err != nil { - return - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z contype) Msgsize() (s int) { - s = msgp.Uint8Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zlqf uint32 - zlqf, err = dc.ReadMapHeader() - if err != nil { - return - } - for zlqf > 0 { - zlqf-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "keys": - var zdaf uint32 - zdaf, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.keys) >= int(zdaf) { - z.keys = (z.keys)[:zdaf] - } else { - z.keys = make([]uint16, zdaf) - } - for zhct := range z.keys { - z.keys[zhct], err = dc.ReadUint16() - if err != nil { - return - } - } - case "needCopyOnWrite": - var zpks uint32 - zpks, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.needCopyOnWrite) >= int(zpks) { - z.needCopyOnWrite = (z.needCopyOnWrite)[:zpks] - } else { - z.needCopyOnWrite = make([]bool, zpks) - } - for zcua := range z.needCopyOnWrite { - z.needCopyOnWrite[zcua], err = dc.ReadBool() - if err != nil { - return - } - } - case "copyOnWrite": - z.copyOnWrite, err = dc.ReadBool() - if err != nil { - return - } - case "conserz": - var zjfb uint32 - zjfb, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.conserz) >= int(zjfb) { - z.conserz = (z.conserz)[:zjfb] - } else { - z.conserz = make([]containerSerz, zjfb) - } - for zxhx := range z.conserz { - var zcxo uint32 - zcxo, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcxo > 0 { - zcxo-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zeff uint8 - zeff, err = dc.ReadUint8() - z.conserz[zxhx].t = contype(zeff) - } - if err != nil { - return - } - case "r": - err = z.conserz[zxhx].r.DecodeMsg(dc) - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 4 - // write "keys" - err = en.Append(0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.keys))) - if err != nil { - return - } - for zhct := range z.keys { - err = en.WriteUint16(z.keys[zhct]) - if err != nil { - return - } - } - // write "needCopyOnWrite" - err = en.Append(0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.needCopyOnWrite))) - if err != nil { - return - } - for zcua := range z.needCopyOnWrite { - err = en.WriteBool(z.needCopyOnWrite[zcua]) - if err != nil { - return - } - } - // write "copyOnWrite" - err = en.Append(0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - if err != nil { - return err - } - err = en.WriteBool(z.copyOnWrite) - if err != nil { - return - } - // write "conserz" - err = en.Append(0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.conserz))) - if err != nil { - return - } - for zxhx := range z.conserz { - // map header, size 2 - // write "t" - err = en.Append(0x82, 0xa1, 0x74) - if err != nil { - return err - } - err = en.WriteUint8(uint8(z.conserz[zxhx].t)) - if err != nil { - return - } - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return err - } - err = z.conserz[zxhx].r.EncodeMsg(en) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 4 - // string "keys" - o = append(o, 0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.keys))) - for zhct := range z.keys { - o = msgp.AppendUint16(o, z.keys[zhct]) - } - // string "needCopyOnWrite" - o = append(o, 0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - o = msgp.AppendArrayHeader(o, uint32(len(z.needCopyOnWrite))) - for zcua := range z.needCopyOnWrite { - o = msgp.AppendBool(o, z.needCopyOnWrite[zcua]) - } - // string "copyOnWrite" - o = append(o, 0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - o = msgp.AppendBool(o, z.copyOnWrite) - // string "conserz" - o = append(o, 0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a) - o = msgp.AppendArrayHeader(o, uint32(len(z.conserz))) - for zxhx := range z.conserz { - // map header, size 2 - // string "t" - o = append(o, 0x82, 0xa1, 0x74) - o = msgp.AppendUint8(o, uint8(z.conserz[zxhx].t)) - // string "r" - o = append(o, 0xa1, 0x72) - o, err = z.conserz[zxhx].r.MarshalMsg(o) - if err != nil { - return - } - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zrsw uint32 - zrsw, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zrsw > 0 { - zrsw-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "keys": - var zxpk uint32 - zxpk, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.keys) >= int(zxpk) { - z.keys = (z.keys)[:zxpk] - } else { - z.keys = make([]uint16, zxpk) - } - for zhct := range z.keys { - z.keys[zhct], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - case "needCopyOnWrite": - var zdnj uint32 - zdnj, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.needCopyOnWrite) >= int(zdnj) { - z.needCopyOnWrite = (z.needCopyOnWrite)[:zdnj] - } else { - z.needCopyOnWrite = make([]bool, zdnj) - } - for zcua := range z.needCopyOnWrite { - z.needCopyOnWrite[zcua], bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - return - } - } - case "copyOnWrite": - z.copyOnWrite, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - return - } - case "conserz": - var zobc uint32 - zobc, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.conserz) >= int(zobc) { - z.conserz = (z.conserz)[:zobc] - } else { - z.conserz = make([]containerSerz, zobc) - } - for zxhx := range z.conserz { - var zsnv uint32 - zsnv, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zsnv > 0 { - zsnv-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zkgt uint8 - zkgt, bts, err = msgp.ReadUint8Bytes(bts) - z.conserz[zxhx].t = contype(zkgt) - } - if err != nil { - return - } - case "r": - bts, err = z.conserz[zxhx].r.UnmarshalMsg(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *roaringArray) Msgsize() (s int) { - s = 1 + 5 + msgp.ArrayHeaderSize + (len(z.keys) * (msgp.Uint16Size)) + 16 + msgp.ArrayHeaderSize + (len(z.needCopyOnWrite) * (msgp.BoolSize)) + 12 + msgp.BoolSize + 8 + msgp.ArrayHeaderSize - for zxhx := range z.conserz { - s += 1 + 2 + msgp.Uint8Size + 2 + z.conserz[zxhx].r.Msgsize() - } - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go index fc1f456..a722760 100644 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go @@ -44,16 +44,10 @@ import ( "unsafe" ) -//go:generate msgp -unexported - // runContainer16 does run-length encoding of sets of // uint16 integers. type runContainer16 struct { - iv []interval16 - card int64 - - // avoid allocation during search - myOpts searchOptions `msg:"-"` + iv []interval16 } // interval16 is the internal to runContainer16 @@ -76,8 +70,8 @@ func newInterval16Range(start, last uint16) interval16 { } // runlen returns the count of integers in the interval. -func (iv interval16) runlen() int64 { - return int64(iv.length) + 1 +func (iv interval16) runlen() int { + return int(iv.length) + 1 } func (iv interval16) last() uint16 { @@ -120,8 +114,6 @@ func (p uint16Slice) Less(i, j int) bool { return p[i] < p[j] } // Swap swaps elements i and j. func (p uint16Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -//msgp:ignore addHelper - // addHelper helps build a runContainer16. type addHelper16 struct { runstart uint16 @@ -201,7 +193,6 @@ func newRunContainer16FromVals(alreadySorted bool, vals ...uint16) *runContainer ah.storeIval(ah.runstart, ah.runlen) } rc.iv = ah.m - rc.card = int64(ah.actuallyAdded) return rc } @@ -291,7 +282,6 @@ func newRunContainer16FromArray(arr *arrayContainer) *runContainer16 { ah.storeIval(ah.runstart, ah.runlen) } rc.iv = ah.m - rc.card = int64(ah.actuallyAdded) return rc } @@ -308,7 +298,6 @@ func (rc *runContainer16) set(alreadySorted bool, vals ...uint16) { rc2 := newRunContainer16FromVals(alreadySorted, vals...) un := rc.union(rc2) rc.iv = un.iv - rc.card = 0 } // canMerge returns true iff the intervals @@ -316,10 +305,10 @@ func (rc *runContainer16) set(alreadySorted bool, vals ...uint16) { // contiguous and so can be merged into // a single interval. func canMerge16(a, b interval16) bool { - if int64(a.last())+1 < int64(b.start) { + if int(a.last())+1 < int(b.start) { return false } - return int64(b.last())+1 >= int64(a.start) + return int(b.last())+1 >= int(a.start) } // haveOverlap differs from canMerge in that @@ -328,10 +317,10 @@ func canMerge16(a, b interval16) bool { // it would be the empty set, and we return // false). func haveOverlap16(a, b interval16) bool { - if int64(a.last())+1 <= int64(b.start) { + if int(a.last())+1 <= int(b.start) { return false } - return int64(b.last())+1 > int64(a.start) + return int(b.last())+1 > int(a.start) } // mergeInterval16s joins a and b into a @@ -392,11 +381,11 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { var m []interval16 - alim := int64(len(rc.iv)) - blim := int64(len(b.iv)) + alim := int(len(rc.iv)) + blim := int(len(b.iv)) - var na int64 // next from a - var nb int64 // next from b + var na int // next from a + var nb int // next from b // merged holds the current merge output, which might // get additional merges before being appended to m. @@ -416,12 +405,12 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { mergedUpdated := false if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) mergedUpdated = true } if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) mergedUpdated = true } if !mergedUpdated { @@ -444,8 +433,8 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { } else { merged = mergeInterval16s(cura, curb) mergedUsed = true - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } } } @@ -464,7 +453,7 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { cura = rc.iv[na] if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) } else { break aAdds } @@ -478,7 +467,7 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { curb = b.iv[nb] if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } else { break bAdds } @@ -500,17 +489,17 @@ func (rc *runContainer16) union(b *runContainer16) *runContainer16 { } // unionCardinality returns the cardinality of the merger of two runContainer16s, the union of rc and b. -func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { +func (rc *runContainer16) unionCardinality(b *runContainer16) uint { // rc is also known as 'a' here, but golint insisted we // call it rc for consistency with the rest of the methods. - answer := uint64(0) + answer := uint(0) - alim := int64(len(rc.iv)) - blim := int64(len(b.iv)) + alim := int(len(rc.iv)) + blim := int(len(b.iv)) - var na int64 // next from a - var nb int64 // next from b + var na int // next from a + var nb int // next from b // merged holds the current merge output, which might // get additional merges before being appended to m. @@ -530,18 +519,18 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { mergedUpdated := false if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) mergedUpdated = true } if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) mergedUpdated = true } if !mergedUpdated { // we know that merged is disjoint from cura and curb //m = append(m, merged) - answer += uint64(merged.last()) - uint64(merged.start) + 1 + answer += uint(merged.last()) - uint(merged.start) + 1 mergedUsed = false } continue @@ -550,19 +539,19 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { // !mergedUsed if !canMerge16(cura, curb) { if cura.start < curb.start { - answer += uint64(cura.last()) - uint64(cura.start) + 1 + answer += uint(cura.last()) - uint(cura.start) + 1 //m = append(m, cura) na++ } else { - answer += uint64(curb.last()) - uint64(curb.start) + 1 + answer += uint(curb.last()) - uint(curb.start) + 1 //m = append(m, curb) nb++ } } else { merged = mergeInterval16s(cura, curb) mergedUsed = true - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } } } @@ -581,7 +570,7 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { cura = rc.iv[na] if canMerge16(cura, merged) { merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) + na = rc.indexOfIntervalAtOrAfter(int(merged.last())+1, na+1) } else { break aAdds } @@ -595,7 +584,7 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { curb = b.iv[nb] if canMerge16(curb, merged) { merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) + nb = b.indexOfIntervalAtOrAfter(int(merged.last())+1, nb+1) } else { break bAdds } @@ -604,23 +593,20 @@ func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { } //m = append(m, merged) - answer += uint64(merged.last()) - uint64(merged.start) + 1 + answer += uint(merged.last()) - uint(merged.start) + 1 } for _, r := range rc.iv[na:] { - answer += uint64(r.last()) - uint64(r.start) + 1 + answer += uint(r.last()) - uint(r.start) + 1 } for _, r := range b.iv[nb:] { - answer += uint64(r.last()) - uint64(r.start) + 1 + answer += uint(r.last()) - uint(r.start) + 1 } return answer } // indexOfIntervalAtOrAfter is a helper for union. -func (rc *runContainer16) indexOfIntervalAtOrAfter(key int64, startIndex int64) int64 { - rc.myOpts.startIndex = startIndex - rc.myOpts.endxIndex = 0 - - w, already, _ := rc.search(key, &rc.myOpts) +func (rc *runContainer16) indexOfIntervalAtOrAfter(key int, startIndex int) int { + w, already, _ := rc.searchRange(key, startIndex, 0) if already { return w } @@ -632,8 +618,8 @@ func (rc *runContainer16) indexOfIntervalAtOrAfter(key int64, startIndex int64) func (rc *runContainer16) intersect(b *runContainer16) *runContainer16 { a := rc - numa := int64(len(a.iv)) - numb := int64(len(b.iv)) + numa := int(len(a.iv)) + numb := int(len(b.iv)) res := &runContainer16{} if numa == 0 || numb == 0 { return res @@ -647,21 +633,21 @@ func (rc *runContainer16) intersect(b *runContainer16) *runContainer16 { var output []interval16 - var acuri int64 - var bcuri int64 + var acuri int + var bcuri int - astart := int64(a.iv[acuri].start) - bstart := int64(b.iv[bcuri].start) + astart := int(a.iv[acuri].start) + bstart := int(b.iv[bcuri].start) var intersection interval16 - var leftoverstart int64 + var leftoverstart int var isOverlap, isLeftoverA, isLeftoverB bool var done bool toploop: for acuri < numa && bcuri < numb { isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = - intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last())) + intersectWithLeftover16(astart, int(a.iv[acuri].last()), bstart, int(b.iv[bcuri].last())) if !isOverlap { switch { @@ -670,17 +656,14 @@ toploop: if done { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) case astart > bstart: bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart) if done { break toploop } - bstart = int64(b.iv[bcuri].start) - - //default: - // panic("impossible that astart == bstart, since !isOverlap") + bstart = int(b.iv[bcuri].start) } } else { @@ -695,7 +678,7 @@ toploop: if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) case isLeftoverB: // note that we change bstart without advancing bcuri, // since we need to capture any 2ndary intersections with b.iv[bcuri] @@ -704,27 +687,23 @@ toploop: if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) default: // neither had leftover, both completely consumed - // optionally, assert for sanity: - //if a.iv[acuri].endx != b.iv[bcuri].endx { - // panic("huh? should only be possible that endx agree now!") - //} // advance to next a interval acuri++ if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) // advance to next b interval bcuri++ if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) } } } // end for toploop @@ -739,12 +718,12 @@ toploop: // intersectCardinality returns the cardinality of the // intersection of rc (also known as 'a') and b. -func (rc *runContainer16) intersectCardinality(b *runContainer16) int64 { - answer := int64(0) +func (rc *runContainer16) intersectCardinality(b *runContainer16) int { + answer := int(0) a := rc - numa := int64(len(a.iv)) - numb := int64(len(b.iv)) + numa := int(len(a.iv)) + numb := int(len(b.iv)) if numa == 0 || numb == 0 { return 0 } @@ -755,14 +734,14 @@ func (rc *runContainer16) intersectCardinality(b *runContainer16) int64 { } } - var acuri int64 - var bcuri int64 + var acuri int + var bcuri int - astart := int64(a.iv[acuri].start) - bstart := int64(b.iv[bcuri].start) + astart := int(a.iv[acuri].start) + bstart := int(b.iv[bcuri].start) var intersection interval16 - var leftoverstart int64 + var leftoverstart int var isOverlap, isLeftoverA, isLeftoverB bool var done bool pass := 0 @@ -771,7 +750,7 @@ toploop: pass++ isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = - intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last())) + intersectWithLeftover16(astart, int(a.iv[acuri].last()), bstart, int(b.iv[bcuri].last())) if !isOverlap { switch { @@ -780,22 +759,19 @@ toploop: if done { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) case astart > bstart: bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart) if done { break toploop } - bstart = int64(b.iv[bcuri].start) - - //default: - // panic("impossible that astart == bstart, since !isOverlap") + bstart = int(b.iv[bcuri].start) } } else { // isOverlap - answer += int64(intersection.last()) - int64(intersection.start) + 1 + answer += int(intersection.last()) - int(intersection.start) + 1 switch { case isLeftoverA: // note that we change astart without advancing acuri, @@ -805,7 +781,7 @@ toploop: if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) case isLeftoverB: // note that we change bstart without advancing bcuri, // since we need to capture any 2ndary intersections with b.iv[bcuri] @@ -814,27 +790,23 @@ toploop: if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) default: // neither had leftover, both completely consumed - // optionally, assert for sanity: - //if a.iv[acuri].endx != b.iv[bcuri].endx { - // panic("huh? should only be possible that endx agree now!") - //} // advance to next a interval acuri++ if acuri >= numa { break toploop } - astart = int64(a.iv[acuri].start) + astart = int(a.iv[acuri].start) // advance to next b interval bcuri++ if bcuri >= numb { break toploop } - bstart = int64(b.iv[bcuri].start) + bstart = int(b.iv[bcuri].start) } } } // end for toploop @@ -844,7 +816,7 @@ toploop: // get returns true iff key is in the container. func (rc *runContainer16) contains(key uint16) bool { - _, in, _ := rc.search(int64(key), nil) + _, in, _ := rc.search(int(key)) return in } @@ -853,22 +825,7 @@ func (rc *runContainer16) numIntervals() int { return len(rc.iv) } -// searchOptions allows us to accelerate search with -// prior knowledge of (mostly lower) bounds. This is used by Union -// and Intersect. -type searchOptions struct { - // start here instead of at 0 - startIndex int64 - - // upper bound instead of len(rc.iv); - // endxIndex == 0 means ignore the bound and use - // endxIndex == n ==len(rc.iv) which is also - // naturally the default for search() - // when opt = nil. - endxIndex int64 -} - -// search returns alreadyPresent to indicate if the +// searchRange returns alreadyPresent to indicate if the // key is already in one of our interval16s. // // If key is alreadyPresent, then whichInterval16 tells @@ -892,24 +849,16 @@ type searchOptions struct { // // runContainer16.search always returns whichInterval16 < len(rc.iv). // -// If not nil, opts can be used to further restrict -// the search space. +// The search space is from startIndex to endxIndex. If endxIndex is set to zero, then there +// no upper bound. // -func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval16 int64, alreadyPresent bool, numCompares int) { - n := int64(len(rc.iv)) +func (rc *runContainer16) searchRange(key int, startIndex int, endxIndex int) (whichInterval16 int, alreadyPresent bool, numCompares int) { + n := int(len(rc.iv)) if n == 0 { return -1, false, 0 } - - startIndex := int64(0) - endxIndex := n - if opts != nil { - startIndex = opts.startIndex - - // let endxIndex == 0 mean no effect - if opts.endxIndex > 0 { - endxIndex = opts.endxIndex - } + if endxIndex == 0 { + endxIndex = n } // sort.Search returns the smallest index i @@ -927,7 +876,7 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 h := i + (j-i)/2 // avoid overflow when computing h as the bisector // i <= h < j numCompares++ - if !(key < int64(rc.iv[h].start)) { + if !(key < int(rc.iv[h].start)) { i = h + 1 } else { j = h @@ -947,7 +896,7 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 if below == n { // all falses => key is >= start of all interval16s // ... so does it belong to the last interval16? - if key < int64(rc.iv[n-1].last())+1 { + if key < int(rc.iv[n-1].last())+1 { // yes, it belongs to the last interval16 alreadyPresent = true return @@ -968,7 +917,7 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 // key is < rc.iv[below].start // is key in below-1 interval16? - if key >= int64(rc.iv[below-1].start) && key < int64(rc.iv[below-1].last())+1 { + if key >= int(rc.iv[below-1].start) && key < int(rc.iv[below-1].last())+1 { // yes, it is. key is in below-1 interval16. alreadyPresent = true return @@ -979,28 +928,55 @@ func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval1 return } -// cardinality returns the count of the integers stored in the -// runContainer16. -func (rc *runContainer16) cardinality() int64 { - if len(rc.iv) == 0 { - rc.card = 0 - return 0 - } - if rc.card > 0 { - return rc.card // already cached - } +// search returns alreadyPresent to indicate if the +// key is already in one of our interval16s. +// +// If key is alreadyPresent, then whichInterval16 tells +// you where. +// +// If key is not already present, then whichInterval16 is +// set as follows: +// +// a) whichInterval16 == len(rc.iv)-1 if key is beyond our +// last interval16 in rc.iv; +// +// b) whichInterval16 == -1 if key is before our first +// interval16 in rc.iv; +// +// c) whichInterval16 is set to the minimum index of rc.iv +// which comes strictly before the key; +// so rc.iv[whichInterval16].last < key, +// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start +// (Note that whichInterval16+1 won't exist when +// whichInterval16 is the last interval.) +// +// runContainer16.search always returns whichInterval16 < len(rc.iv). +// +func (rc *runContainer16) search(key int) (whichInterval16 int, alreadyPresent bool, numCompares int) { + return rc.searchRange(key, 0, 0) +} + +// getCardinality returns the count of the integers stored in the +// runContainer16. The running complexity depends on the size +// of the container. +func (rc *runContainer16) getCardinality() int { // have to compute it - var n int64 + n := 0 for _, p := range rc.iv { n += p.runlen() } - rc.card = n // cache it return n } +// isEmpty returns true if the container is empty. +// It runs in constant time. +func (rc *runContainer16) isEmpty() bool { + return len(rc.iv) == 0 +} + // AsSlice decompresses the contents into a []uint16 slice. func (rc *runContainer16) AsSlice() []uint16 { - s := make([]uint16, rc.cardinality()) + s := make([]uint16, rc.getCardinality()) j := 0 for _, p := range rc.iv { for i := p.start; i <= p.last(); i++ { @@ -1070,19 +1046,15 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { // but note that some unit tests use this method to build up test // runcontainers without calling runOptimize - k64 := int64(k) + k64 := int(k) - index, present, _ := rc.search(k64, nil) + index, present, _ := rc.search(k64) if present { return // already there } wasNew = true - // increment card if it is cached already - if rc.card > 0 { - rc.card++ - } - n := int64(len(rc.iv)) + n := int(len(rc.iv)) if index == -1 { // we may need to extend the first run if n > 0 { @@ -1099,7 +1071,7 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { // are we off the end? handle both index == n and index == n-1: if index >= n-1 { - if int64(rc.iv[n-1].last())+1 == k64 { + if int(rc.iv[n-1].last())+1 == k64 { rc.iv[n-1].length++ return } @@ -1118,7 +1090,7 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { right := index + 1 // are we fusing left and right by adding k? - if int64(rc.iv[left].last())+1 == k64 && int64(rc.iv[right].start) == k64+1 { + if int(rc.iv[left].last())+1 == k64 && int(rc.iv[right].start) == k64+1 { // fuse into left rc.iv[left].length = rc.iv[right].last() - rc.iv[left].start // remove redundant right @@ -1127,14 +1099,14 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { } // are we an addition to left? - if int64(rc.iv[left].last())+1 == k64 { + if int(rc.iv[left].last())+1 == k64 { // yes rc.iv[left].length++ return } // are we an addition to right? - if int64(rc.iv[right].start) == k64+1 { + if int(rc.iv[right].start) == k64+1 { // yes rc.iv[right].start = k rc.iv[right].length++ @@ -1147,13 +1119,11 @@ func (rc *runContainer16) Add(k uint16) (wasNew bool) { return } -//msgp:ignore runIterator - // runIterator16 advice: you must call hasNext() // before calling next()/peekNext() to insure there are contents. type runIterator16 struct { rc *runContainer16 - curIndex int64 + curIndex int curPosInIndex uint16 } @@ -1178,8 +1148,8 @@ func (rc *runContainer16) iterate(cb func(x uint16) bool) bool { // returns true when there is at least one more value // available in the iteration sequence. func (ri *runIterator16) hasNext() bool { - return int64(len(ri.rc.iv)) > ri.curIndex+1 || - (int64(len(ri.rc.iv)) == ri.curIndex+1 && ri.rc.iv[ri.curIndex].length >= ri.curPosInIndex) + return int(len(ri.rc.iv)) > ri.curIndex+1 || + (int(len(ri.rc.iv)) == ri.curIndex+1 && ri.rc.iv[ri.curIndex].length >= ri.curPosInIndex) } // next returns the next value in the iteration sequence. @@ -1207,13 +1177,8 @@ func (ri *runIterator16) advanceIfNeeded(minval uint16) { return } - opt := &searchOptions{ - startIndex: ri.curIndex, - endxIndex: int64(len(ri.rc.iv)), - } - // interval cannot be -1 because of minval > peekNext - interval, isPresent, _ := ri.rc.search(int64(minval), opt) + interval, isPresent, _ := ri.rc.searchRange(int(minval), ri.curIndex, int(len(ri.rc.iv))) // if the minval is present, set the curPosIndex at the right position if isPresent { @@ -1231,13 +1196,13 @@ func (ri *runIterator16) advanceIfNeeded(minval uint16) { // before calling next() to insure there are contents. type runReverseIterator16 struct { rc *runContainer16 - curIndex int64 // index into rc.iv + curIndex int // index into rc.iv curPosInIndex uint16 // offset in rc.iv[curIndex] } // newRunReverseIterator16 returns a new empty run iterator. func (rc *runContainer16) newRunReverseIterator16() *runReverseIterator16 { - index := int64(len(rc.iv)) - 1 + index := int(len(rc.iv)) - 1 pos := uint16(0) if index >= 0 { @@ -1310,7 +1275,7 @@ func (ri *runIterator16) nextMany(hs uint32, buf []uint32) int { ri.curPosInIndex = 0 ri.curIndex++ - if ri.curIndex == int64(len(ri.rc.iv)) { + if ri.curIndex == int(len(ri.rc.iv)) { break } } else { @@ -1351,7 +1316,7 @@ func (ri *runIterator16) nextMany64(hs uint64, buf []uint64) int { ri.curPosInIndex = 0 ri.curIndex++ - if ri.curIndex == int64(len(ri.rc.iv)) { + if ri.curIndex == int(len(ri.rc.iv)) { break } } else { @@ -1365,8 +1330,8 @@ func (ri *runIterator16) nextMany64(hs uint64, buf []uint64) int { // remove removes key from the container. func (rc *runContainer16) removeKey(key uint16) (wasPresent bool) { - var index int64 - index, wasPresent, _ = rc.search(int64(key), nil) + var index int + index, wasPresent, _ = rc.search(int(key)) if !wasPresent { return // already removed, nothing to do. } @@ -1377,15 +1342,14 @@ func (rc *runContainer16) removeKey(key uint16) (wasPresent bool) { // internal helper functions -func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16) { - rc.card-- +func (rc *runContainer16) deleteAt(curIndex *int, curPosInIndex *uint16) { ci := *curIndex pos := *curPosInIndex // are we first, last, or in the middle of our interval16? switch { case pos == 0: - if int64(rc.iv[ci].length) == 0 { + if int(rc.iv[ci].length) == 0 { // our interval disappears rc.iv = append(rc.iv[:ci], rc.iv[ci+1:]...) // curIndex stays the same, since the delete did @@ -1406,8 +1370,8 @@ func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16) { // split into two, adding an interval16 new0 := newInterval16Range(rc.iv[ci].start, rc.iv[ci].start+*curPosInIndex-1) - new1start := int64(rc.iv[ci].start+*curPosInIndex) + 1 - if new1start > int64(MaxUint16) { + new1start := int(rc.iv[ci].start+*curPosInIndex) + 1 + if new1start > int(MaxUint16) { panic("overflow?!?!") } new1 := newInterval16Range(uint16(new1start), rc.iv[ci].last()) @@ -1420,14 +1384,14 @@ func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16) { } -func have4Overlap16(astart, alast, bstart, blast int64) bool { +func have4Overlap16(astart, alast, bstart, blast int) bool { if alast+1 <= bstart { return false } return blast+1 > astart } -func intersectWithLeftover16(astart, alast, bstart, blast int64) (isOverlap, isLeftoverA, isLeftoverB bool, leftoverstart int64, intersection interval16) { +func intersectWithLeftover16(astart, alast, bstart, blast int) (isOverlap, isLeftoverA, isLeftoverB bool, leftoverstart int, intersection interval16) { if !have4Overlap16(astart, alast, bstart, blast) { return } @@ -1457,17 +1421,13 @@ func intersectWithLeftover16(astart, alast, bstart, blast int64) (isOverlap, isL return } -func (rc *runContainer16) findNextIntervalThatIntersectsStartingFrom(startIndex int64, key int64) (index int64, done bool) { - - rc.myOpts.startIndex = startIndex - rc.myOpts.endxIndex = 0 - - w, _, _ := rc.search(key, &rc.myOpts) +func (rc *runContainer16) findNextIntervalThatIntersectsStartingFrom(startIndex int, key int) (index int, done bool) { + w, _, _ := rc.searchRange(key, startIndex, 0) // rc.search always returns w < len(rc.iv) if w < startIndex { // not found and comes before lower bound startIndex, // so just use the lower bound. - if startIndex == int64(len(rc.iv)) { + if startIndex == int(len(rc.iv)) { // also this bump up means that we are done return startIndex, true } @@ -1485,25 +1445,6 @@ func sliceToString16(m []interval16) string { return s } -// selectInt16 returns the j-th value in the container. -// We panic of j is out of bounds. -func (rc *runContainer16) selectInt16(j uint16) int { - n := rc.cardinality() - if int64(j) > n { - panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n)) - } - - var offset int64 - for k := range rc.iv { - nextOffset := offset + rc.iv[k].runlen() - if nextOffset > int64(j) { - return int(int64(rc.iv[k].start) + (int64(j) - offset)) - } - offset = nextOffset - } - panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n)) -} - // helper for invert func (rc *runContainer16) invertlastInterval(origin uint16, lastIdx int) []interval16 { cur := rc.iv[lastIdx] @@ -1535,7 +1476,7 @@ func (rc *runContainer16) invert() *runContainer16 { case 1: return &runContainer16{iv: rc.invertlastInterval(0, 0)} } - var invstart int64 + var invstart int ult := ni - 1 for i, cur := range rc.iv { if i == ult { @@ -1554,7 +1495,7 @@ func (rc *runContainer16) invert() *runContainer16 { if cur.start > 0 { m = append(m, newInterval16Range(uint16(invstart), cur.start-1)) } - invstart = int64(cur.last() + 1) + invstart = int(cur.last() + 1) } return &runContainer16{iv: m} } @@ -1567,7 +1508,7 @@ func (iv interval16) isSuperSetOf(b interval16) bool { return iv.start <= b.start && b.last() <= iv.last() } -func (iv interval16) subtractInterval(del interval16) (left []interval16, delcount int64) { +func (iv interval16) subtractInterval(del interval16) (left []interval16, delcount int) { isect, isEmpty := intersectInterval16s(iv, del) if isEmpty { @@ -1592,7 +1533,7 @@ func (iv interval16) subtractInterval(del interval16) (left []interval16, delcou func (rc *runContainer16) isubtract(del interval16) { origiv := make([]interval16, len(rc.iv)) copy(origiv, rc.iv) - n := int64(len(rc.iv)) + n := int(len(rc.iv)) if n == 0 { return // already done. } @@ -1603,9 +1544,8 @@ func (rc *runContainer16) isubtract(del interval16) { } // INVAR there is some intersection between rc and del - istart, startAlready, _ := rc.search(int64(del.start), nil) - ilast, lastAlready, _ := rc.search(int64(del.last()), nil) - rc.card = -1 + istart, startAlready, _ := rc.search(int(del.start)) + ilast, lastAlready, _ := rc.search(int(del.last())) if istart == -1 { if ilast == n-1 && !lastAlready { rc.iv = nil @@ -1620,8 +1560,8 @@ func (rc *runContainer16) isubtract(del interval16) { // would overwrite values in iv b/c res0 can have len 2. so // write to origiv instead. lost := 1 + ilast - istart - changeSize := int64(len(res0)) - lost - newSize := int64(len(rc.iv)) + changeSize + changeSize := int(len(res0)) - lost + newSize := int(len(rc.iv)) + changeSize // rc.iv = append(pre, caboose...) // return @@ -1629,19 +1569,19 @@ func (rc *runContainer16) isubtract(del interval16) { if ilast != istart { res1, _ := rc.iv[ilast].subtractInterval(del) res0 = append(res0, res1...) - changeSize = int64(len(res0)) - lost - newSize = int64(len(rc.iv)) + changeSize + changeSize = int(len(res0)) - lost + newSize = int(len(rc.iv)) + changeSize } switch { case changeSize < 0: // shrink - copy(rc.iv[istart+int64(len(res0)):], rc.iv[ilast+1:]) - copy(rc.iv[istart:istart+int64(len(res0))], res0) + copy(rc.iv[istart+int(len(res0)):], rc.iv[ilast+1:]) + copy(rc.iv[istart:istart+int(len(res0))], res0) rc.iv = rc.iv[:newSize] return case changeSize == 0: // stay the same - copy(rc.iv[istart:istart+int64(len(res0))], res0) + copy(rc.iv[istart:istart+int(len(res0))], res0) return default: // changeSize > 0 is only possible when ilast == istart. @@ -1698,7 +1638,7 @@ func (rc *runContainer16) isubtract(del interval16) { // INVAR: ilast < n-1 lost := ilast - istart changeSize := -lost - newSize := int64(len(rc.iv)) + changeSize + newSize := int(len(rc.iv)) + changeSize if changeSize != 0 { copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) } @@ -1715,8 +1655,8 @@ func (rc *runContainer16) isubtract(del interval16) { rc.iv[istart] = res0[0] } lost := 1 + (ilast - istart) - changeSize := int64(len(res0)) - lost - newSize := int64(len(rc.iv)) + changeSize + changeSize := int(len(res0)) - lost + newSize := int(len(rc.iv)) + changeSize if changeSize != 0 { copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) } @@ -1727,8 +1667,8 @@ func (rc *runContainer16) isubtract(del interval16) { // we can only shrink or stay the same size res1, _ := rc.iv[ilast].subtractInterval(del) lost := ilast - istart - changeSize := int64(len(res1)) - lost - newSize := int64(len(rc.iv)) + changeSize + changeSize := int(len(res1)) - lost + newSize := int(len(rc.iv)) + changeSize if changeSize != 0 { // move the tail first to make room for res1 copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) @@ -1932,8 +1872,6 @@ func (rc *runContainer16) iand(a container) container { } func (rc *runContainer16) inplaceIntersect(rc2 *runContainer16) container { - // TODO: optimize by doing less allocation, possibly? - // sect will be new sect := rc.intersect(rc2) *rc = *sect return rc @@ -1987,17 +1925,18 @@ func (rc *runContainer16) andNot(a container) container { panic("unsupported container type") } -func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { - k := 0 - var val int64 +func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { + k := i + var val int for _, p := range rc.iv { n := p.runlen() - for j := int64(0); j < n; j++ { - val = int64(p.start) + j - x[k+i] = uint32(val) | mask + for j := int(0); j < n; j++ { + val = int(p.start) + j + x[k] = uint32(val) | mask k++ } } + return k } func (rc *runContainer16) getShortIterator() shortPeekable { @@ -2016,8 +1955,11 @@ func (rc *runContainer16) getManyIterator() manyIterable { // is still abe to express 2^16 because it is an int not an uint16. func (rc *runContainer16) iaddRange(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange", endx)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange", endx)) + } + if firstOfRange == endx { + return rc } addme := newRunContainer16TakeOwnership([]interval16{ { @@ -2031,10 +1973,13 @@ func (rc *runContainer16) iaddRange(firstOfRange, endx int) container { // remove the values in the range [firstOfRange,endx) func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container { - if firstOfRange >= endx { + if firstOfRange > endx { panic(fmt.Sprintf("request to iremove empty set [%v, %v),"+ " nothing to do.", firstOfRange, endx)) - //return rc + } + // empty removal + if firstOfRange == endx { + return rc } x := newInterval16Range(uint16(firstOfRange), uint16(endx-1)) rc.isubtract(x) @@ -2043,8 +1988,8 @@ func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container { // not flip the values in the range [firstOfRange,endx) func (rc *runContainer16) not(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange = %v", endx, firstOfRange)) } return rc.Not(firstOfRange, endx) @@ -2064,8 +2009,8 @@ func (rc *runContainer16) not(firstOfRange, endx int) container { // func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange == %v", endx, firstOfRange)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange == %v", endx, firstOfRange)) } if firstOfRange >= endx { @@ -2203,9 +2148,21 @@ func (rc *runContainer16) orBitmapContainerCardinality(bc *bitmapContainer) int // orArray finds the union of rc and ac. func (rc *runContainer16) orArray(ac *arrayContainer) container { - bc1 := newBitmapContainerFromRun(rc) - bc2 := ac.toBitmapContainer() - return bc1.orBitmap(bc2) + if ac.isEmpty() { + return rc.clone() + } + if rc.isEmpty() { + return ac.clone() + } + intervals, cardMinusOne := runArrayUnionToRuns(rc, ac) + result := newRunContainer16TakeOwnership(intervals) + if len(intervals) >= 2048 && cardMinusOne >= arrayDefaultMaxSize { + return newBitmapContainerFromRun(result) + } + if len(intervals)*2 > 1+int(cardMinusOne) { + return result.toArrayContainer() + } + return result } // orArray finds the union of rc and ac. @@ -2230,8 +2187,8 @@ func (rc *runContainer16) ior(a container) container { func (rc *runContainer16) inplaceUnion(rc2 *runContainer16) container { for _, p := range rc2.iv { - last := int64(p.last()) - for i := int64(p.start); i <= last; i++ { + last := int(p.last()) + for i := int(p.start); i <= last; i++ { rc.Add(uint16(i)) } } @@ -2248,13 +2205,88 @@ func (rc *runContainer16) iorBitmapContainer(bc *bitmapContainer) container { } func (rc *runContainer16) iorArray(ac *arrayContainer) container { - it := ac.getShortIterator() - for it.hasNext() { - rc.Add(it.next()) + if rc.isEmpty() { + return ac.clone() + } + if ac.isEmpty() { + return rc + } + var cardMinusOne uint16 + //TODO: perform the union algorithm in-place using rc.iv + // this can be done with methods like the in-place array container union + // but maybe lazily moving the remaining elements back. + rc.iv, cardMinusOne = runArrayUnionToRuns(rc, ac) + if len(rc.iv) >= 2048 && cardMinusOne >= arrayDefaultMaxSize { + return newBitmapContainerFromRun(rc) + } + if len(rc.iv)*2 > 1+int(cardMinusOne) { + return rc.toArrayContainer() } return rc } +func runArrayUnionToRuns(rc *runContainer16, ac *arrayContainer) ([]interval16, uint16) { + pos1 := 0 + pos2 := 0 + length1 := len(ac.content) + length2 := len(rc.iv) + target := make([]interval16, 0, len(rc.iv)) + // have to find the first range + // options are + // 1. from array container + // 2. from run container + var previousInterval interval16 + var cardMinusOne uint16 + if ac.content[0] < rc.iv[0].start { + previousInterval.start = ac.content[0] + previousInterval.length = 0 + pos1++ + } else { + previousInterval.start = rc.iv[0].start + previousInterval.length = rc.iv[0].length + pos2++ + } + + for pos1 < length1 || pos2 < length2 { + if pos1 < length1 { + s1 := ac.content[pos1] + if s1 <= previousInterval.start+previousInterval.length { + pos1++ + continue + } + if previousInterval.last() < MaxUint16 && previousInterval.last()+1 == s1 { + previousInterval.length++ + pos1++ + continue + } + } + if pos2 < length2 { + range2 := rc.iv[pos2] + if range2.start <= previousInterval.last() || range2.start > 0 && range2.start-1 == previousInterval.last() { + pos2++ + if previousInterval.last() < range2.last() { + previousInterval.length = range2.last() - previousInterval.start + } + continue + } + } + cardMinusOne += previousInterval.length + 1 + target = append(target, previousInterval) + if pos2 == length2 || pos1 < length1 && ac.content[pos1] < rc.iv[pos2].start { + previousInterval.start = ac.content[pos1] + previousInterval.length = 0 + pos1++ + } else { + previousInterval = rc.iv[pos2] + pos2++ + } + } + cardMinusOne += previousInterval.length + 1 + target = append(target, previousInterval) + + return target, cardMinusOne +} + // lazyIOR is described (not yet implemented) in // this nice note from @lemire on // https://github.com/RoaringBitmap/roaring/pull/70#issuecomment-263613737 @@ -2310,9 +2342,9 @@ func (rc *runContainer16) lazyOR(a container) container { } func (rc *runContainer16) intersects(a container) bool { - // TODO: optimize by doing inplace/less allocation, possibly? + // TODO: optimize by doing inplace/less allocation isect := rc.and(a) - return isect.getCardinality() > 0 + return !isect.isEmpty() } func (rc *runContainer16) xor(a container) container { @@ -2341,44 +2373,51 @@ func (rc *runContainer16) iandNot(a container) container { // flip the values in the range [firstOfRange,endx) func (rc *runContainer16) inot(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange)) + if firstOfRange > endx { + panic(fmt.Sprintf("invalid %v = endx > firstOfRange = %v", endx, firstOfRange)) + } + if firstOfRange > endx { + return rc } // TODO: minimize copies, do it all inplace; not() makes a copy. rc = rc.Not(firstOfRange, endx) return rc } -func (rc *runContainer16) getCardinality() int { - return int(rc.cardinality()) -} - func (rc *runContainer16) rank(x uint16) int { - n := int64(len(rc.iv)) - xx := int64(x) - w, already, _ := rc.search(xx, nil) + n := int(len(rc.iv)) + xx := int(x) + w, already, _ := rc.search(xx) if w < 0 { return 0 } if !already && w == n-1 { return rc.getCardinality() } - var rnk int64 + var rnk int if !already { - for i := int64(0); i <= w; i++ { + for i := int(0); i <= w; i++ { rnk += rc.iv[i].runlen() } return int(rnk) } - for i := int64(0); i < w; i++ { + for i := int(0); i < w; i++ { rnk += rc.iv[i].runlen() } - rnk += int64(x-rc.iv[w].start) + 1 + rnk += int(x-rc.iv[w].start) + 1 return int(rnk) } func (rc *runContainer16) selectInt(x uint16) int { - return rc.selectInt16(x) + var offset int + for k := range rc.iv { + nextOffset := offset + rc.iv[k].runlen() + if nextOffset > int(x) { + return int(int(rc.iv[k].start) + (int(x) - offset)) + } + offset = nextOffset + } + panic("cannot select x") } func (rc *runContainer16) andNotRunContainer16(b *runContainer16) container { @@ -2456,11 +2495,9 @@ func (rc *runContainer16) xorBitmap(bc *bitmapContainer) container { // convert to bitmap or array *if needed* func (rc *runContainer16) toEfficientContainer() container { - - // runContainer16SerializedSizeInBytes(numRuns) sizeAsRunContainer := rc.getSizeInBytes() sizeAsBitmapContainer := bitmapContainerSizeInBytes() - card := int(rc.cardinality()) + card := rc.getCardinality() sizeAsArrayContainer := arrayContainerSizeInBytes(card) if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) { return rc diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go deleted file mode 100644 index 84537d0..0000000 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go +++ /dev/null @@ -1,1104 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *addHelper16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbai uint32 - zbai, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbai > 0 { - zbai-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "runstart": - z.runstart, err = dc.ReadUint16() - if err != nil { - return - } - case "runlen": - z.runlen, err = dc.ReadUint16() - if err != nil { - return - } - case "actuallyAdded": - z.actuallyAdded, err = dc.ReadUint16() - if err != nil { - return - } - case "m": - var zcmr uint32 - zcmr, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.m) >= int(zcmr) { - z.m = (z.m)[:zcmr] - } else { - z.m = make([]interval16, zcmr) - } - for zxvk := range z.m { - var zajw uint32 - zajw, err = dc.ReadMapHeader() - if err != nil { - return - } - for zajw > 0 { - zajw-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.m[zxvk].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.m[zxvk].length, err = dc.ReadUint16() - z.m[zxvk].length -= z.m[zxvk].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "rc": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - var zwht uint32 - zwht, err = dc.ReadMapHeader() - if err != nil { - return - } - for zwht > 0 { - zwht-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zhct uint32 - zhct, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.rc.iv) >= int(zhct) { - z.rc.iv = (z.rc.iv)[:zhct] - } else { - z.rc.iv = make([]interval16, zhct) - } - for zbzg := range z.rc.iv { - var zcua uint32 - zcua, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcua > 0 { - zcua-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.rc.iv[zbzg].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.rc.iv[zbzg].length, err = dc.ReadUint16() - z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "card": - z.rc.card, err = dc.ReadInt64() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *addHelper16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 5 - // write "runstart" - err = en.Append(0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.runstart) - if err != nil { - return - } - // write "runlen" - err = en.Append(0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e) - if err != nil { - return err - } - err = en.WriteUint16(z.runlen) - if err != nil { - return - } - // write "actuallyAdded" - err = en.Append(0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64) - if err != nil { - return err - } - err = en.WriteUint16(z.actuallyAdded) - if err != nil { - return - } - // write "m" - err = en.Append(0xa1, 0x6d) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.m))) - if err != nil { - return - } - for zxvk := range z.m { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.m[zxvk].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.m[zxvk].last()) - if err != nil { - return - } - } - // write "rc" - err = en.Append(0xa2, 0x72, 0x63) - if err != nil { - return err - } - if z.rc == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - // map header, size 2 - // write "iv" - err = en.Append(0x82, 0xa2, 0x69, 0x76) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.rc.iv))) - if err != nil { - return - } - for zbzg := range z.rc.iv { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.rc.iv[zbzg].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.rc.iv[zbzg].last()) - if err != nil { - return - } - } - // write "card" - err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64) - if err != nil { - return err - } - err = en.WriteInt64(z.rc.card) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *addHelper16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 5 - // string "runstart" - o = append(o, 0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.runstart) - // string "runlen" - o = append(o, 0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e) - o = msgp.AppendUint16(o, z.runlen) - // string "actuallyAdded" - o = append(o, 0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64) - o = msgp.AppendUint16(o, z.actuallyAdded) - // string "m" - o = append(o, 0xa1, 0x6d) - o = msgp.AppendArrayHeader(o, uint32(len(z.m))) - for zxvk := range z.m { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.m[zxvk].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.m[zxvk].last()) - } - // string "rc" - o = append(o, 0xa2, 0x72, 0x63) - if z.rc == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "iv" - o = append(o, 0x82, 0xa2, 0x69, 0x76) - o = msgp.AppendArrayHeader(o, uint32(len(z.rc.iv))) - for zbzg := range z.rc.iv { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.rc.iv[zbzg].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.rc.iv[zbzg].last()) - } - // string "card" - o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64) - o = msgp.AppendInt64(o, z.rc.card) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *addHelper16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zxhx uint32 - zxhx, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zxhx > 0 { - zxhx-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "runstart": - z.runstart, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "runlen": - z.runlen, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "actuallyAdded": - z.actuallyAdded, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "m": - var zlqf uint32 - zlqf, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.m) >= int(zlqf) { - z.m = (z.m)[:zlqf] - } else { - z.m = make([]interval16, zlqf) - } - for zxvk := range z.m { - var zdaf uint32 - zdaf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zdaf > 0 { - zdaf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.m[zxvk].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.m[zxvk].length, bts, err = msgp.ReadUint16Bytes(bts) - z.m[zxvk].length -= z.m[zxvk].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "rc": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - var zpks uint32 - zpks, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zpks > 0 { - zpks-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zjfb uint32 - zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.rc.iv) >= int(zjfb) { - z.rc.iv = (z.rc.iv)[:zjfb] - } else { - z.rc.iv = make([]interval16, zjfb) - } - for zbzg := range z.rc.iv { - var zcxo uint32 - zcxo, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcxo > 0 { - zcxo-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.rc.iv[zbzg].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.rc.iv[zbzg].length, bts, err = msgp.ReadUint16Bytes(bts) - z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "card": - z.rc.card, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *addHelper16) Msgsize() (s int) { - s = 1 + 9 + msgp.Uint16Size + 7 + msgp.Uint16Size + 14 + msgp.Uint16Size + 2 + msgp.ArrayHeaderSize + (len(z.m) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 3 - if z.rc == nil { - s += msgp.NilSize - } else { - s += 1 + 3 + msgp.ArrayHeaderSize + (len(z.rc.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size - } - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *interval16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zeff uint32 - zeff, err = dc.ReadMapHeader() - if err != nil { - return - } - for zeff > 0 { - zeff-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.length, err = dc.ReadUint16() - z.length = -z.start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z interval16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.last()) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z interval16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.last()) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *interval16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zrsw uint32 - zrsw, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zrsw > 0 { - zrsw-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.length, bts, err = msgp.ReadUint16Bytes(bts) - z.length -= z.start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z interval16) Msgsize() (s int) { - s = 1 + 6 + msgp.Uint16Size + 5 + msgp.Uint16Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *runContainer16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zdnj uint32 - zdnj, err = dc.ReadMapHeader() - if err != nil { - return - } - for zdnj > 0 { - zdnj-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zobc uint32 - zobc, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.iv) >= int(zobc) { - z.iv = (z.iv)[:zobc] - } else { - z.iv = make([]interval16, zobc) - } - for zxpk := range z.iv { - var zsnv uint32 - zsnv, err = dc.ReadMapHeader() - if err != nil { - return - } - for zsnv > 0 { - zsnv-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.iv[zxpk].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.iv[zxpk].length, err = dc.ReadUint16() - z.iv[zxpk].length -= z.iv[zxpk].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "card": - z.card, err = dc.ReadInt64() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *runContainer16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "iv" - err = en.Append(0x82, 0xa2, 0x69, 0x76) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.iv))) - if err != nil { - return - } - for zxpk := range z.iv { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.iv[zxpk].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.iv[zxpk].last()) - if err != nil { - return - } - } - // write "card" - err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64) - if err != nil { - return err - } - err = en.WriteInt64(z.card) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *runContainer16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "iv" - o = append(o, 0x82, 0xa2, 0x69, 0x76) - o = msgp.AppendArrayHeader(o, uint32(len(z.iv))) - for zxpk := range z.iv { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.iv[zxpk].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.iv[zxpk].last()) - } - // string "card" - o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64) - o = msgp.AppendInt64(o, z.card) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *runContainer16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zkgt uint32 - zkgt, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zkgt > 0 { - zkgt-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zema uint32 - zema, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.iv) >= int(zema) { - z.iv = (z.iv)[:zema] - } else { - z.iv = make([]interval16, zema) - } - for zxpk := range z.iv { - var zpez uint32 - zpez, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zpez > 0 { - zpez-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.iv[zxpk].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.iv[zxpk].length, bts, err = msgp.ReadUint16Bytes(bts) - z.iv[zxpk].length -= z.iv[zxpk].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "card": - z.card, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *runContainer16) Msgsize() (s int) { - s = 1 + 3 + msgp.ArrayHeaderSize + (len(z.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *runIterator16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zqke uint32 - zqke, err = dc.ReadMapHeader() - if err != nil { - return - } - for zqke > 0 { - zqke-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "rc": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - err = z.rc.DecodeMsg(dc) - if err != nil { - return - } - } - case "curIndex": - z.curIndex, err = dc.ReadInt64() - if err != nil { - return - } - case "curPosInIndex": - z.curPosInIndex, err = dc.ReadUint16() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *runIterator16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "rc" - err = en.Append(0x83, 0xa2, 0x72, 0x63) - if err != nil { - return err - } - if z.rc == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.rc.EncodeMsg(en) - if err != nil { - return - } - } - // write "curIndex" - err = en.Append(0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78) - if err != nil { - return err - } - err = en.WriteInt64(z.curIndex) - if err != nil { - return - } - // write "curPosInIndex" - err = en.Append(0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78) - if err != nil { - return err - } - err = en.WriteUint16(z.curPosInIndex) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *runIterator16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "rc" - o = append(o, 0x83, 0xa2, 0x72, 0x63) - if z.rc == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.rc.MarshalMsg(o) - if err != nil { - return - } - } - // string "curIndex" - o = append(o, 0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendInt64(o, z.curIndex) - // string "curPosInIndex" - o = append(o, 0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendUint16(o, z.curPosInIndex) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *runIterator16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zqyh uint32 - zqyh, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zqyh > 0 { - zqyh-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "rc": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - bts, err = z.rc.UnmarshalMsg(bts) - if err != nil { - return - } - } - case "curIndex": - z.curIndex, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - case "curPosInIndex": - z.curPosInIndex, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *runIterator16) Msgsize() (s int) { - s = 1 + 3 - if z.rc == nil { - s += msgp.NilSize - } else { - s += z.rc.Msgsize() - } - s += 9 + msgp.Int64Size + 14 + msgp.Uint16Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *uint16Slice) DecodeMsg(dc *msgp.Reader) (err error) { - var zjpj uint32 - zjpj, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap((*z)) >= int(zjpj) { - (*z) = (*z)[:zjpj] - } else { - (*z) = make(uint16Slice, zjpj) - } - for zywj := range *z { - (*z)[zywj], err = dc.ReadUint16() - if err != nil { - return - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z uint16Slice) EncodeMsg(en *msgp.Writer) (err error) { - err = en.WriteArrayHeader(uint32(len(z))) - if err != nil { - return - } - for zzpf := range z { - err = en.WriteUint16(z[zzpf]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z uint16Slice) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendArrayHeader(o, uint32(len(z))) - for zzpf := range z { - o = msgp.AppendUint16(o, z[zzpf]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *uint16Slice) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zgmo uint32 - zgmo, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap((*z)) >= int(zgmo) { - (*z) = (*z)[:zgmo] - } else { - (*z) = make(uint16Slice, zgmo) - } - for zrfe := range *z { - (*z)[zrfe], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z uint16Slice) Msgsize() (s int) { - s = msgp.ArrayHeaderSize + (len(z) * (msgp.Uint16Size)) - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization.go b/vendor/github.com/RoaringBitmap/roaring/serialization.go index 7b7ed29..70e3bbc 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization.go @@ -3,8 +3,6 @@ package roaring import ( "encoding/binary" "io" - - "github.com/tinylib/msgp/msgp" ) // writeTo for runContainer16 follows this @@ -19,16 +17,3 @@ func (b *runContainer16) writeTo(stream io.Writer) (int, error) { } return stream.Write(buf) } - -func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) { - bts, err := b.MarshalMsg(nil) - if err != nil { - return 0, err - } - return stream.Write(bts) -} - -func (b *runContainer16) readFromMsgpack(stream io.Reader) (int, error) { - err := msgp.Decode(stream, b) - return 0, err -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go index 82edeb8..221e173 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go @@ -3,6 +3,7 @@ package roaring import ( + "encoding/binary" "errors" "io" "reflect" @@ -132,3 +133,285 @@ func byteSliceAsInterval16Slice(slice []byte) (result []interval16) { // return result return } + +// FromBuffer creates a bitmap from its serialized version stored in buffer. +// It uses CRoaring's frozen bitmap format. +// +// The format specification is available here: +// https://github.com/RoaringBitmap/CRoaring/blob/2c867e9f9c9e2a3a7032791f94c4c7ae3013f6e0/src/roaring.c#L2756-L2783 +// +// The provided byte array (buf) is expected to be a constant. +// The function makes the best effort attempt not to copy data. +// Only little endian is supported. The function will err if it detects a big +// endian serialized file. +// You should take care not to modify buff as it will likely result in +// unexpected program behavior. +// If said buffer comes from a memory map, it's advisable to give it read +// only permissions, either at creation or by calling Mprotect from the +// golang.org/x/sys/unix package. +// +// Resulting bitmaps are effectively immutable in the following sense: +// a copy-on-write marker is used so that when you modify the resulting +// bitmap, copies of selected data (containers) are made. +// You should *not* change the copy-on-write status of the resulting +// bitmaps (SetCopyOnWrite). +// +// If buf becomes unavailable, then a bitmap created with +// FromBuffer would be effectively broken. Furthermore, any +// bitmap derived from this bitmap (e.g., via Or, And) might +// also be broken. Thus, before making buf unavailable, you should +// call CloneCopyOnWriteContainers on all such bitmaps. +// +func (rb *Bitmap) FrozenView(buf []byte) error { + return rb.highlowcontainer.frozenView(buf) +} + +/* Verbatim specification from CRoaring. + * + * FROZEN SERIALIZATION FORMAT DESCRIPTION + * + * -- (beginning must be aligned by 32 bytes) -- + * uint64_t[BITSET_CONTAINER_SIZE_IN_WORDS * num_bitset_containers] + * rle16_t[total number of rle elements in all run containers] + * uint16_t[total number of array elements in all array containers] + * uint16_t[num_containers] + * uint16_t[num_containers] + * uint8_t[num_containers] + *
uint32_t + * + *
is a 4-byte value which is a bit union of FROZEN_COOKIE (15 bits) + * and the number of containers (17 bits). + * + * stores number of elements for every container. + * Its meaning depends on container type. + * For array and bitset containers, this value is the container cardinality minus one. + * For run container, it is the number of rle_t elements (n_runs). + * + * ,, are flat arrays of elements of + * all containers of respective type. + * + * <*_data> and are kept close together because they are not accessed + * during deserilization. This may reduce IO in case of large mmaped bitmaps. + * All members have their native alignments during deserilization except
, + * which is not guaranteed to be aligned by 4 bytes. + */ +const FROZEN_COOKIE = 13766 + +var ( + FrozenBitmapInvalidCookie = errors.New("header does not contain the FROZEN_COOKIE") + FrozenBitmapBigEndian = errors.New("loading big endian frozen bitmaps is not supported") + FrozenBitmapIncomplete = errors.New("input buffer too small to contain a frozen bitmap") + FrozenBitmapOverpopulated = errors.New("too many containers") + FrozenBitmapUnexpectedData = errors.New("spurious data in input") + FrozenBitmapInvalidTypecode = errors.New("unrecognized typecode") + FrozenBitmapBufferTooSmall = errors.New("buffer too small") +) + +func (ra *roaringArray) frozenView(buf []byte) error { + if len(buf) < 4 { + return FrozenBitmapIncomplete + } + + headerBE := binary.BigEndian.Uint32(buf[len(buf)-4:]) + if headerBE & 0x7fff == FROZEN_COOKIE { + return FrozenBitmapBigEndian + } + + header := binary.LittleEndian.Uint32(buf[len(buf)-4:]) + buf = buf[:len(buf)-4] + + if header & 0x7fff != FROZEN_COOKIE { + return FrozenBitmapInvalidCookie + } + + nCont := int(header >> 15) + if nCont > (1 << 16) { + return FrozenBitmapOverpopulated + } + + // 1 byte per type, 2 bytes per key, 2 bytes per count. + if len(buf) < 5*nCont { + return FrozenBitmapIncomplete + } + + types := buf[len(buf)-nCont:] + buf = buf[:len(buf)-nCont] + + counts := byteSliceAsUint16Slice(buf[len(buf)-2*nCont:]) + buf = buf[:len(buf)-2*nCont] + + keys := byteSliceAsUint16Slice(buf[len(buf)-2*nCont:]) + buf = buf[:len(buf)-2*nCont] + + nBitmap, nArray, nRun := uint64(0), uint64(0), uint64(0) + nArrayEl, nRunEl := uint64(0), uint64(0) + for i, t := range types { + switch (t) { + case 1: + nBitmap++ + case 2: + nArray++ + nArrayEl += uint64(counts[i])+1 + case 3: + nRun++ + nRunEl += uint64(counts[i]) + default: + return FrozenBitmapInvalidTypecode + } + } + + if uint64(len(buf)) < (1 << 13)*nBitmap + 4*nRunEl + 2*nArrayEl { + return FrozenBitmapIncomplete + } + + bitsetsArena := byteSliceAsUint64Slice(buf[:(1 << 13)*nBitmap]) + buf = buf[(1 << 13)*nBitmap:] + + runsArena := byteSliceAsInterval16Slice(buf[:4*nRunEl]) + buf = buf[4*nRunEl:] + + arraysArena := byteSliceAsUint16Slice(buf[:2*nArrayEl]) + buf = buf[2*nArrayEl:] + + if len(buf) != 0 { + return FrozenBitmapUnexpectedData + } + + // TODO: maybe arena_alloc all this. + containers := make([]container, nCont) + bitsets := make([]bitmapContainer, nBitmap) + arrays := make([]arrayContainer, nArray) + runs := make([]runContainer16, nRun) + needCOW := make([]bool, nCont) + + iBitset, iArray, iRun := uint64(0), uint64(0), uint64(0) + for i, t := range types { + needCOW[i] = true + + switch (t) { + case 1: + containers[i] = &bitsets[iBitset] + bitsets[iBitset].cardinality = int(counts[i])+1 + bitsets[iBitset].bitmap = bitsetsArena[:1024] + bitsetsArena = bitsetsArena[1024:] + iBitset++ + case 2: + containers[i] = &arrays[iArray] + sz := int(counts[i])+1 + arrays[iArray].content = arraysArena[:sz] + arraysArena = arraysArena[sz:] + iArray++ + case 3: + containers[i] = &runs[iRun] + runs[iRun].iv = runsArena[:counts[i]] + runsArena = runsArena[counts[i]:] + iRun++ + } + } + + // Not consuming the full input is a bug. + if iBitset != nBitmap || len(bitsetsArena) != 0 || + iArray != nArray || len(arraysArena) != 0 || + iRun != nRun || len(runsArena) != 0 { + panic("we missed something") + } + + ra.keys = keys + ra.containers = containers + ra.needCopyOnWrite = needCOW + ra.copyOnWrite = true + + return nil +} + +func (bm *Bitmap) GetFrozenSizeInBytes() uint64 { + nBits, nArrayEl, nRunEl := uint64(0), uint64(0), uint64(0) + for _, c := range bm.highlowcontainer.containers { + switch v := c.(type) { + case *bitmapContainer: + nBits++ + case *arrayContainer: + nArrayEl += uint64(len(v.content)) + case *runContainer16: + nRunEl += uint64(len(v.iv)) + } + } + return 4 + 5*uint64(len(bm.highlowcontainer.containers)) + + (nBits << 13) + 2*nArrayEl + 4*nRunEl +} + +func (bm *Bitmap) Freeze() ([]byte, error) { + sz := bm.GetFrozenSizeInBytes() + buf := make([]byte, sz) + _, err := bm.FreezeTo(buf) + return buf, err +} + +func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { + containers := bm.highlowcontainer.containers + nCont := len(containers) + + nBits, nArrayEl, nRunEl := 0, 0, 0 + for _, c := range containers { + switch v := c.(type) { + case *bitmapContainer: + nBits++ + case *arrayContainer: + nArrayEl += len(v.content) + case *runContainer16: + nRunEl += len(v.iv) + } + } + + serialSize := 4 + 5*nCont + (1 << 13)*nBits + 4*nRunEl + 2*nArrayEl + if len(buf) < serialSize { + return 0, FrozenBitmapBufferTooSmall + } + + bitsArena := byteSliceAsUint64Slice(buf[:(1 << 13)*nBits]) + buf = buf[(1 << 13)*nBits:] + + runsArena := byteSliceAsInterval16Slice(buf[:4*nRunEl]) + buf = buf[4*nRunEl:] + + arraysArena := byteSliceAsUint16Slice(buf[:2*nArrayEl]) + buf = buf[2*nArrayEl:] + + keys := byteSliceAsUint16Slice(buf[:2*nCont]) + buf = buf[2*nCont:] + + counts := byteSliceAsUint16Slice(buf[:2*nCont]) + buf = buf[2*nCont:] + + types := buf[:nCont] + buf = buf[nCont:] + + header := uint32(FROZEN_COOKIE|(nCont << 15)) + binary.LittleEndian.PutUint32(buf[:4], header) + + copy(keys, bm.highlowcontainer.keys[:]) + + for i, c := range containers { + switch v := c.(type) { + case *bitmapContainer: + copy(bitsArena, v.bitmap) + bitsArena = bitsArena[1024:] + counts[i] = uint16(v.cardinality-1) + types[i] = 1 + case *arrayContainer: + copy(arraysArena, v.content) + arraysArena = arraysArena[len(v.content):] + elems := len(v.content) + counts[i] = uint16(elems-1) + types[i] = 2 + case *runContainer16: + copy(runsArena, v.iv) + runs := len(v.iv) + runsArena = runsArena[runs:] + counts[i] = uint16(runs) + types[i] = 3 + } + } + + return serialSize, nil +} diff --git a/vendor/github.com/RoaringBitmap/roaring/smat.go b/vendor/github.com/RoaringBitmap/roaring/smat.go index 9da4756..972cd24 100644 --- a/vendor/github.com/RoaringBitmap/roaring/smat.go +++ b/vendor/github.com/RoaringBitmap/roaring/smat.go @@ -63,7 +63,7 @@ import ( "sort" "github.com/mschoch/smat" - "github.com/willf/bitset" + "github.com/bits-and-blooms/bitset" ) // fuzz test using state machine driven by byte stream. diff --git a/vendor/github.com/willf/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore similarity index 100% rename from vendor/github.com/willf/bitset/.gitignore rename to vendor/github.com/bits-and-blooms/bitset/.gitignore diff --git a/vendor/github.com/willf/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml similarity index 100% rename from vendor/github.com/willf/bitset/.travis.yml rename to vendor/github.com/bits-and-blooms/bitset/.travis.yml diff --git a/vendor/github.com/willf/bitset/LICENSE b/vendor/github.com/bits-and-blooms/bitset/LICENSE similarity index 100% rename from vendor/github.com/willf/bitset/LICENSE rename to vendor/github.com/bits-and-blooms/bitset/LICENSE diff --git a/vendor/github.com/willf/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md similarity index 84% rename from vendor/github.com/willf/bitset/README.md rename to vendor/github.com/bits-and-blooms/bitset/README.md index 50338e7..97e8307 100644 --- a/vendor/github.com/willf/bitset/README.md +++ b/vendor/github.com/bits-and-blooms/bitset/README.md @@ -2,10 +2,9 @@ *Go language library to map between non-negative integers and boolean values* -[![Test](https://github.com/willf/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) -[![Master Coverage Status](https://coveralls.io/repos/willf/bitset/badge.svg?branch=master&service=github)](https://coveralls.io/github/willf/bitset?branch=master) +[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) [![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/willf/bitset?tab=doc)](https://pkg.go.dev/github.com/willf/bitset?tab=doc) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc) ## Description @@ -30,7 +29,7 @@ import ( "fmt" "math/rand" - "github.com/willf/bitset" + "github.com/bits-and-blooms/bitset" ) func main() { @@ -63,7 +62,7 @@ func main() { As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. -Package documentation is at: https://pkg.go.dev/github.com/willf/bitset?tab=doc +Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc ## Memory Usage @@ -78,7 +77,7 @@ It is possible that a later version will match the `math/bits` return signature ## Installation ```bash -go get github.com/willf/bitset +go get github.com/bits-and-blooms/bitset ``` ## Contributing diff --git a/vendor/github.com/willf/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml similarity index 100% rename from vendor/github.com/willf/bitset/azure-pipelines.yml rename to vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml diff --git a/vendor/github.com/willf/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go similarity index 97% rename from vendor/github.com/willf/bitset/bitset.go rename to vendor/github.com/bits-and-blooms/bitset/bitset.go index 21e889d..d688806 100644 --- a/vendor/github.com/willf/bitset/bitset.go +++ b/vendor/github.com/bits-and-blooms/bitset/bitset.go @@ -209,6 +209,27 @@ func (b *BitSet) Flip(i uint) *BitSet { return b } +// FlipRange bit in [start, end). +// If end>= Cap(), this function will panic. +// Warning: using a very large value for 'end' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) FlipRange(start, end uint) *BitSet { + if start >= end { + return b + } + + b.extendSetMaybe(end - 1) + var startWord uint = start >> log2WordSize + var endWord uint = end >> log2WordSize + b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1))) + for i := startWord; i < endWord; i++ { + b.set[i] = ^b.set[i] + } + b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1)) + return b +} + // Shrink shrinks BitSet so that the provided value is the last possible // set value. It clears all bits > the provided index and reduces the size // and length of the set. @@ -519,7 +540,7 @@ func (b *BitSet) Copy(c *BitSet) (count uint) { } // Count (number of set bits). -// Also known as "popcount" or "popularity count". +// Also known as "popcount" or "population count". func (b *BitSet) Count() uint { if b != nil && b.set != nil { return uint(popcntSlice(b.set)) diff --git a/vendor/github.com/bits-and-blooms/bitset/go.mod b/vendor/github.com/bits-and-blooms/bitset/go.mod new file mode 100644 index 0000000..c43e452 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/go.mod @@ -0,0 +1,3 @@ +module github.com/bits-and-blooms/bitset + +go 1.14 diff --git a/vendor/github.com/willf/bitset/go.sum b/vendor/github.com/bits-and-blooms/bitset/go.sum similarity index 100% rename from vendor/github.com/willf/bitset/go.sum rename to vendor/github.com/bits-and-blooms/bitset/go.sum diff --git a/vendor/github.com/willf/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt.go diff --git a/vendor/github.com/willf/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_19.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt_19.go diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_amd64.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_amd64.s rename to vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s diff --git a/vendor/github.com/willf/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go similarity index 100% rename from vendor/github.com/willf/bitset/popcnt_generic.go rename to vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go diff --git a/vendor/github.com/willf/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go similarity index 100% rename from vendor/github.com/willf/bitset/trailing_zeros_18.go rename to vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go diff --git a/vendor/github.com/willf/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go similarity index 100% rename from vendor/github.com/willf/bitset/trailing_zeros_19.go rename to vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go diff --git a/vendor/github.com/blevesearch/bleve/v2/.gitignore b/vendor/github.com/blevesearch/bleve/v2/.gitignore index ab7a1e2..7512de7 100644 --- a/vendor/github.com/blevesearch/bleve/v2/.gitignore +++ b/vendor/github.com/blevesearch/bleve/v2/.gitignore @@ -17,3 +17,4 @@ vendor/** /search/query/y.output *.test tags +go.sum diff --git a/vendor/github.com/blevesearch/bleve/v2/README.md b/vendor/github.com/blevesearch/bleve/v2/README.md index 30b1ecb..e10454d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/README.md +++ b/vendor/github.com/blevesearch/bleve/v2/README.md @@ -16,22 +16,26 @@ modern text indexing in go - [blevesearch.com](http://www.blevesearch.com/) * Index any go data structure (including JSON) * Intelligent defaults backed up by powerful configuration * Supported field types: - * Text, Numeric, Date + * Text, Numeric, Datetime, Boolean * Supported query types: - * Term, Phrase, Match, Match Phrase, Prefix - * Conjunction, Disjunction, Boolean - * Numeric Range, Date Range - * Simple query [syntax](http://www.blevesearch.com/docs/Query-String-Query/) for human entry -* tf-idf Scoring + * Term, Phrase, Match, Match Phrase, Prefix, Fuzzy + * Conjunction, Disjunction, Boolean (must/should/must_not) + * Term Range, Numeric Range, Date Range + * [Geo Spatial](https://github.com/blevesearch/bleve/blob/master/geo/README.md) + * Simple [query string syntax](http://www.blevesearch.com/docs/Query-String-Query/) for human entry +* [tf-idf](https://en.wikipedia.org/wiki/Tf-idf) Scoring +* Boosting * Search result match highlighting -* Supports Aggregating Facets: +* Aggregations/faceting support: * Terms Facet * Numeric Range Facet * Date Range Facet -## Discussion +## Discussion/Issues -Discuss usage and development of bleve in the [google group](https://groups.google.com/forum/#!forum/bleve). +Discuss usage/development of bleve and/or report issues here: +* [Github issues](https://github.com/blevesearch/bleve/issues) +* [Google group](https://groups.google.com/forum/#!forum/bleve) ## Indexing @@ -63,6 +67,46 @@ searchRequest := bleve.NewSearchRequest(query) searchResult, _ := index.Search(searchRequest) ``` +## Command Line Interface + +To install the CLI for the latest release of bleve, run: + +```bash +$ go install github.com/blevesearch/bleve/v2/cmd/bleve@latest +``` + +``` +$ bleve --help +Bleve is a command-line tool to interact with a bleve index. + +Usage: + bleve [command] + +Available Commands: + bulk bulk loads from newline delimited JSON files + check checks the contents of the index + count counts the number documents in the index + create creates a new index + dictionary prints the term dictionary for the specified field in the index + dump dumps the contents of the index + fields lists the fields in this index + help Help about any command + index adds the files to the index + mapping prints the mapping used for this index + query queries the index + registry registry lists the bleve components compiled into this executable + scorch command-line tool to interact with a scorch index + +Flags: + -h, --help help for bleve + +Use "bleve [command] --help" for more information about a command. +``` + +## Text Analysis Wizard + +[bleveanalysis.couchbase.com](https://bleveanalysis.couchbase.com) + ## License Apache License Version 2.0 diff --git a/vendor/github.com/blevesearch/bleve/v2/SECURITY.md b/vendor/github.com/blevesearch/bleve/v2/SECURITY.md new file mode 100644 index 0000000..51c6b6b --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/SECURITY.md @@ -0,0 +1,15 @@ +# Security Policy + +## Supported Versions + +We support the latest release (for example, bleve v2.3.x). + +## Reporting a Vulnerability + +All security issues for this project should be reported by email to security@couchbase.com and fts-team@couchbase.com. +This mail will be delivered to the owners of this project. + +- To ensure your report is NOT marked as spam, please include the word "security/vulnerability" along with the project name (blevesearch/bleve) in the subject of the email. +- Please be as descriptive as possible while explaining the issue, and a testcase highlighting the issue is always welcome. + +Your email will be acknowledged at the soonest possible. diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/custom/custom.go b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/custom/custom.go index 70e7a26..5e28c95 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/custom/custom.go +++ b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/custom/custom.go @@ -23,7 +23,7 @@ import ( const Name = "custom" -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { +func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Analyzer, error) { var err error var charFilters []analysis.CharFilter @@ -88,7 +88,7 @@ func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) ( } } - rv := analysis.Analyzer{ + rv := analysis.DefaultAnalyzer{ Tokenizer: tokenizer, } if charFilters != nil { diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go index 473c294..6bb56d6 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go +++ b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/keyword/keyword.go @@ -22,12 +22,12 @@ import ( const Name = "keyword" -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { +func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Analyzer, error) { keywordTokenizer, err := cache.TokenizerNamed(single.Name) if err != nil { return nil, err } - rv := analysis.Analyzer{ + rv := analysis.DefaultAnalyzer{ Tokenizer: keywordTokenizer, } return &rv, nil diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/simple/simple.go b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/simple/simple.go index 46a715c..6954e5d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/simple/simple.go +++ b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/simple/simple.go @@ -23,7 +23,7 @@ import ( const Name = "simple" -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { +func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Analyzer, error) { tokenizer, err := cache.TokenizerNamed(letter.Name) if err != nil { return nil, err @@ -32,7 +32,7 @@ func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) ( if err != nil { return nil, err } - rv := analysis.Analyzer{ + rv := analysis.DefaultAnalyzer{ Tokenizer: tokenizer, TokenFilters: []analysis.TokenFilter{ toLowerFilter, diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go index 80a481b..96387bd 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go +++ b/vendor/github.com/blevesearch/bleve/v2/analysis/analyzer/standard/standard.go @@ -24,7 +24,7 @@ import ( const Name = "standard" -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { +func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Analyzer, error) { tokenizer, err := cache.TokenizerNamed(unicode.Name) if err != nil { return nil, err @@ -37,7 +37,7 @@ func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) ( if err != nil { return nil, err } - rv := analysis.Analyzer{ + rv := analysis.DefaultAnalyzer{ Tokenizer: tokenizer, TokenFilters: []analysis.TokenFilter{ toLowerFilter, diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go b/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go index 7a4ae58..44a8d4c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go +++ b/vendor/github.com/blevesearch/bleve/v2/analysis/lang/en/analyzer_en.go @@ -32,7 +32,7 @@ import ( const AnalyzerName = "en" -func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { +func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Analyzer, error) { tokenizer, err := cache.TokenizerNamed(unicode.Name) if err != nil { return nil, err @@ -53,7 +53,7 @@ func AnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) ( if err != nil { return nil, err } - rv := analysis.Analyzer{ + rv := analysis.DefaultAnalyzer{ Tokenizer: tokenizer, TokenFilters: []analysis.TokenFilter{ possEnFilter, diff --git a/vendor/github.com/blevesearch/bleve/v2/analysis/type.go b/vendor/github.com/blevesearch/bleve/v2/analysis/type.go index 589cc1c..9e7bfa1 100644 --- a/vendor/github.com/blevesearch/bleve/v2/analysis/type.go +++ b/vendor/github.com/blevesearch/bleve/v2/analysis/type.go @@ -34,6 +34,7 @@ const ( Single Double Boolean + IP ) // Token represents one occurrence of a term at a particular location in a @@ -71,13 +72,17 @@ type TokenFilter interface { Filter(TokenStream) TokenStream } -type Analyzer struct { +type Analyzer interface { + Analyze([]byte) TokenStream +} + +type DefaultAnalyzer struct { CharFilters []CharFilter Tokenizer Tokenizer TokenFilters []TokenFilter } -func (a *Analyzer) Analyze(input []byte) TokenStream { +func (a *DefaultAnalyzer) Analyze(input []byte) TokenStream { if a.CharFilters != nil { for _, cf := range a.CharFilters { input = cf.Filter(input) diff --git a/vendor/github.com/blevesearch/bleve/v2/config_app.go b/vendor/github.com/blevesearch/bleve/v2/config_app.go index 112d0b6..60b1db3 100644 --- a/vendor/github.com/blevesearch/bleve/v2/config_app.go +++ b/vendor/github.com/blevesearch/bleve/v2/config_app.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build appengine || appenginevm // +build appengine appenginevm package bleve diff --git a/vendor/github.com/blevesearch/bleve/v2/config_disk.go b/vendor/github.com/blevesearch/bleve/v2/config_disk.go index ccfd6da..a9ab1e4 100644 --- a/vendor/github.com/blevesearch/bleve/v2/config_disk.go +++ b/vendor/github.com/blevesearch/bleve/v2/config_disk.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !appengine && !appenginevm // +build !appengine,!appenginevm package bleve diff --git a/vendor/github.com/blevesearch/bleve/v2/document/document.go b/vendor/github.com/blevesearch/bleve/v2/document/document.go index 1a6050f..54fd6d4 100644 --- a/vendor/github.com/blevesearch/bleve/v2/document/document.go +++ b/vendor/github.com/blevesearch/bleve/v2/document/document.go @@ -30,9 +30,14 @@ func init() { } type Document struct { - id string `json:"id"` - Fields []Field `json:"fields"` - CompositeFields []*CompositeField + id string `json:"id"` + Fields []Field `json:"fields"` + CompositeFields []*CompositeField + StoredFieldsSize uint64 +} + +func (d *Document) StoredFieldsBytes() uint64 { + return d.StoredFieldsSize } func NewDocument(id string) *Document { diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go b/vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go index 89de4e3..719d18c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go +++ b/vendor/github.com/blevesearch/bleve/v2/document/field_geopoint.go @@ -42,6 +42,8 @@ type GeoPointField struct { numPlainTextBytes uint64 length int frequencies index.TokenFrequencies + + spatialplugin index.SpatialAnalyzerPlugin } func (n *GeoPointField) Size() int { @@ -75,7 +77,7 @@ func (n *GeoPointField) AnalyzedTokenFrequencies() index.TokenFrequencies { } func (n *GeoPointField) Analyze() { - tokens := make(analysis.TokenStream, 0) + tokens := make(analysis.TokenStream, 0, 8) tokens = append(tokens, &analysis.Token{ Start: 0, End: len(n.value), @@ -84,24 +86,42 @@ func (n *GeoPointField) Analyze() { Type: analysis.Numeric, }) - original, err := n.value.Int64() - if err == nil { + if n.spatialplugin != nil { + lat, _ := n.Lat() + lon, _ := n.Lon() + p := &geo.Point{Lat: lat, Lon: lon} + terms := n.spatialplugin.GetIndexTokens(p) - shift := GeoPrecisionStep - for shift < 64 { - shiftEncoded, err := numeric.NewPrefixCodedInt64(original, shift) - if err != nil { - break - } + for _, term := range terms { token := analysis.Token{ Start: 0, - End: len(shiftEncoded), - Term: shiftEncoded, + End: len(term), + Term: []byte(term), Position: 1, - Type: analysis.Numeric, + Type: analysis.AlphaNumeric, } tokens = append(tokens, &token) - shift += GeoPrecisionStep + } + } else { + original, err := n.value.Int64() + if err == nil { + + shift := GeoPrecisionStep + for shift < 64 { + shiftEncoded, err := numeric.NewPrefixCodedInt64(original, shift) + if err != nil { + break + } + token := analysis.Token{ + Start: 0, + End: len(shiftEncoded), + Term: shiftEncoded, + Position: 1, + Type: analysis.Numeric, + } + tokens = append(tokens, &token) + shift += GeoPrecisionStep + } } } @@ -164,3 +184,10 @@ func NewGeoPointFieldWithIndexingOptions(name string, arrayPositions []uint64, l numPlainTextBytes: uint64(8), } } + +// SetSpatialAnalyzerPlugin implements the +// index.TokenisableSpatialField interface. +func (n *GeoPointField) SetSpatialAnalyzerPlugin( + plugin index.SpatialAnalyzerPlugin) { + n.spatialplugin = plugin +} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_geoshape.go b/vendor/github.com/blevesearch/bleve/v2/document/field_geoshape.go new file mode 100644 index 0000000..a20ff18 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/document/field_geoshape.go @@ -0,0 +1,235 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package document + +import ( + "fmt" + "reflect" + + "github.com/blevesearch/bleve/v2/analysis" + "github.com/blevesearch/bleve/v2/geo" + "github.com/blevesearch/bleve/v2/size" + index "github.com/blevesearch/bleve_index_api" + "github.com/blevesearch/geo/geojson" +) + +var reflectStaticSizeGeoShapeField int + +func init() { + var f GeoShapeField + reflectStaticSizeGeoShapeField = int(reflect.TypeOf(f).Size()) +} + +const DefaultGeoShapeIndexingOptions = index.IndexField | index.DocValues + +type GeoShapeField struct { + name string + shape index.GeoJSON + arrayPositions []uint64 + options index.FieldIndexingOptions + numPlainTextBytes uint64 + length int + encodedValue []byte + value []byte + + frequencies index.TokenFrequencies +} + +func (n *GeoShapeField) Size() int { + return reflectStaticSizeGeoShapeField + size.SizeOfPtr + + len(n.name) + + len(n.arrayPositions)*size.SizeOfUint64 +} + +func (n *GeoShapeField) Name() string { + return n.name +} + +func (n *GeoShapeField) ArrayPositions() []uint64 { + return n.arrayPositions +} + +func (n *GeoShapeField) Options() index.FieldIndexingOptions { + return n.options +} + +func (n *GeoShapeField) EncodedFieldType() byte { + return 's' +} + +func (n *GeoShapeField) AnalyzedLength() int { + return n.length +} + +func (n *GeoShapeField) AnalyzedTokenFrequencies() index.TokenFrequencies { + return n.frequencies +} + +func (n *GeoShapeField) Analyze() { + // compute the bytes representation for the coordinates + tokens := make(analysis.TokenStream, 0) + tokens = append(tokens, &analysis.Token{ + Start: 0, + End: len(n.encodedValue), + Term: n.encodedValue, + Position: 1, + Type: analysis.AlphaNumeric, + }) + + rti := geo.GetSpatialAnalyzerPlugin("s2") + terms := rti.GetIndexTokens(n.shape) + + for _, term := range terms { + token := analysis.Token{ + Start: 0, + End: len(term), + Term: []byte(term), + Position: 1, + Type: analysis.AlphaNumeric, + } + tokens = append(tokens, &token) + } + + n.length = len(tokens) + n.frequencies = analysis.TokenFrequency(tokens, n.arrayPositions, n.options) +} + +func (n *GeoShapeField) Value() []byte { + return n.value +} + +func (n *GeoShapeField) GoString() string { + return fmt.Sprintf("&document.GeoShapeField{Name:%s, Options: %s, Value: %s}", + n.name, n.options, n.value) +} + +func (n *GeoShapeField) NumPlainTextBytes() uint64 { + return n.numPlainTextBytes +} + +func NewGeoShapeField(name string, arrayPositions []uint64, + coordinates [][][][]float64, typ string) *GeoShapeField { + return NewGeoShapeFieldWithIndexingOptions(name, arrayPositions, + coordinates, typ, DefaultGeoShapeIndexingOptions) +} + +func NewGeoShapeFieldFromBytes(name string, arrayPositions []uint64, + value []byte) *GeoShapeField { + return &GeoShapeField{ + name: name, + arrayPositions: arrayPositions, + value: value, + options: DefaultGeoShapeIndexingOptions, + numPlainTextBytes: uint64(len(value)), + } +} + +func NewGeoShapeFieldWithIndexingOptions(name string, arrayPositions []uint64, + coordinates [][][][]float64, typ string, + options index.FieldIndexingOptions) *GeoShapeField { + shape, encodedValue, err := geo.NewGeoJsonShape(coordinates, typ) + if err != nil { + return nil + } + + // extra glue bytes to work around the term splitting logic from interfering + // the custom encoding of the geoshape coordinates inside the docvalues. + encodedValue = append(geo.GlueBytes, append(encodedValue, geo.GlueBytes...)...) + + // get the byte value for the geoshape. + value, err := shape.Value() + if err != nil { + return nil + } + + options = options | DefaultGeoShapeIndexingOptions + + return &GeoShapeField{ + shape: shape, + name: name, + arrayPositions: arrayPositions, + options: options, + encodedValue: encodedValue, + value: value, + numPlainTextBytes: uint64(len(value)), + } +} + +func NewGeometryCollectionFieldWithIndexingOptions(name string, + arrayPositions []uint64, coordinates [][][][][]float64, types []string, + options index.FieldIndexingOptions) *GeoShapeField { + shape, encodedValue, err := geo.NewGeometryCollection(coordinates, types) + if err != nil { + return nil + } + + // extra glue bytes to work around the term splitting logic from interfering + // the custom encoding of the geoshape coordinates inside the docvalues. + encodedValue = append(geo.GlueBytes, append(encodedValue, geo.GlueBytes...)...) + + // get the byte value for the geometryCollection. + value, err := shape.Value() + if err != nil { + return nil + } + + options = options | DefaultGeoShapeIndexingOptions + + return &GeoShapeField{ + shape: shape, + name: name, + arrayPositions: arrayPositions, + options: options, + encodedValue: encodedValue, + value: value, + numPlainTextBytes: uint64(len(value)), + } +} + +func NewGeoCircleFieldWithIndexingOptions(name string, arrayPositions []uint64, + centerPoint []float64, radius string, + options index.FieldIndexingOptions) *GeoShapeField { + shape, encodedValue, err := geo.NewGeoCircleShape(centerPoint, radius) + if err != nil { + return nil + } + + // extra glue bytes to work around the term splitting logic from interfering + // the custom encoding of the geoshape coordinates inside the docvalues. + encodedValue = append(geo.GlueBytes, append(encodedValue, geo.GlueBytes...)...) + + // get the byte value for the circle. + value, err := shape.Value() + if err != nil { + return nil + } + + options = options | DefaultGeoShapeIndexingOptions + + return &GeoShapeField{ + shape: shape, + name: name, + arrayPositions: arrayPositions, + options: options, + encodedValue: encodedValue, + value: value, + numPlainTextBytes: uint64(len(value)), + } +} + +// GeoShape is an implementation of the index.GeoShapeField interface. +func (n *GeoShapeField) GeoShape() (index.GeoJSON, error) { + return geojson.ParseGeoJSONShape(n.value) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_ip.go b/vendor/github.com/blevesearch/bleve/v2/document/field_ip.go new file mode 100644 index 0000000..1e5be50 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/document/field_ip.go @@ -0,0 +1,132 @@ +// Copyright (c) 2021 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package document + +import ( + "fmt" + "net" + "reflect" + + "github.com/blevesearch/bleve/v2/analysis" + "github.com/blevesearch/bleve/v2/size" + index "github.com/blevesearch/bleve_index_api" +) + +var reflectStaticSizeIPField int + +func init() { + var f IPField + reflectStaticSizeIPField = int(reflect.TypeOf(f).Size()) +} + +const DefaultIPIndexingOptions = index.StoreField | index.IndexField | index.DocValues | index.IncludeTermVectors + +type IPField struct { + name string + arrayPositions []uint64 + options index.FieldIndexingOptions + value net.IP + numPlainTextBytes uint64 + length int + frequencies index.TokenFrequencies +} + +func (b *IPField) Size() int { + return reflectStaticSizeIPField + size.SizeOfPtr + + len(b.name) + + len(b.arrayPositions)*size.SizeOfUint64 + + len(b.value) +} + +func (b *IPField) Name() string { + return b.name +} + +func (b *IPField) ArrayPositions() []uint64 { + return b.arrayPositions +} + +func (b *IPField) Options() index.FieldIndexingOptions { + return b.options +} + +func (n *IPField) EncodedFieldType() byte { + return 'i' +} + +func (n *IPField) AnalyzedLength() int { + return n.length +} + +func (n *IPField) AnalyzedTokenFrequencies() index.TokenFrequencies { + return n.frequencies +} + +func (b *IPField) Analyze() { + + tokens := analysis.TokenStream{ + &analysis.Token{ + Start: 0, + End: len(b.value), + Term: b.value, + Position: 1, + Type: analysis.IP, + }, + } + b.length = 1 + b.frequencies = analysis.TokenFrequency(tokens, b.arrayPositions, b.options) +} + +func (b *IPField) Value() []byte { + return b.value +} + +func (b *IPField) IP() (net.IP, error) { + return net.IP(b.value), nil +} + +func (b *IPField) GoString() string { + return fmt.Sprintf("&document.IPField{Name:%s, Options: %s, Value: %s}", b.name, b.options, net.IP(b.value)) +} + +func (b *IPField) NumPlainTextBytes() uint64 { + return b.numPlainTextBytes +} + +func NewIPFieldFromBytes(name string, arrayPositions []uint64, value []byte) *IPField { + return &IPField{ + name: name, + arrayPositions: arrayPositions, + value: value, + options: DefaultNumericIndexingOptions, + numPlainTextBytes: uint64(len(value)), + } +} + +func NewIPField(name string, arrayPositions []uint64, v net.IP) *IPField { + return NewIPFieldWithIndexingOptions(name, arrayPositions, v, DefaultIPIndexingOptions) +} + +func NewIPFieldWithIndexingOptions(name string, arrayPositions []uint64, b net.IP, options index.FieldIndexingOptions) *IPField { + v := b.To16() + + return &IPField{ + name: name, + arrayPositions: arrayPositions, + value: v, + options: options, + numPlainTextBytes: net.IPv6len, + } +} diff --git a/vendor/github.com/blevesearch/bleve/v2/document/field_text.go b/vendor/github.com/blevesearch/bleve/v2/document/field_text.go index 924de53..fddc59d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/document/field_text.go +++ b/vendor/github.com/blevesearch/bleve/v2/document/field_text.go @@ -36,7 +36,7 @@ type TextField struct { name string arrayPositions []uint64 options index.FieldIndexingOptions - analyzer *analysis.Analyzer + analyzer analysis.Analyzer value []byte numPlainTextBytes uint64 length int @@ -100,7 +100,7 @@ func (t *TextField) Analyze() { t.frequencies = analysis.TokenFrequency(tokens, t.arrayPositions, t.options) } -func (t *TextField) Analyzer() *analysis.Analyzer { +func (t *TextField) Analyzer() analysis.Analyzer { return t.analyzer } @@ -134,7 +134,7 @@ func NewTextFieldWithIndexingOptions(name string, arrayPositions []uint64, value } } -func NewTextFieldWithAnalyzer(name string, arrayPositions []uint64, value []byte, analyzer *analysis.Analyzer) *TextField { +func NewTextFieldWithAnalyzer(name string, arrayPositions []uint64, value []byte, analyzer analysis.Analyzer) *TextField { return &TextField{ name: name, arrayPositions: arrayPositions, @@ -145,7 +145,7 @@ func NewTextFieldWithAnalyzer(name string, arrayPositions []uint64, value []byte } } -func NewTextFieldCustom(name string, arrayPositions []uint64, value []byte, options index.FieldIndexingOptions, analyzer *analysis.Analyzer) *TextField { +func NewTextFieldCustom(name string, arrayPositions []uint64, value []byte, options index.FieldIndexingOptions, analyzer analysis.Analyzer) *TextField { return &TextField{ name: name, arrayPositions: arrayPositions, diff --git a/vendor/github.com/blevesearch/bleve/v2/geo/README.md b/vendor/github.com/blevesearch/bleve/v2/geo/README.md index 43bcd98..6112ff5 100644 --- a/vendor/github.com/blevesearch/bleve/v2/geo/README.md +++ b/vendor/github.com/blevesearch/bleve/v2/geo/README.md @@ -1,5 +1,273 @@ # geo support in bleve +Latest bleve spatial capabilities are powered by spatial hierarchical tokens generated from s2geometry. +You can find more details about the [s2geometry basics here](http://s2geometry.io/), and explore the +extended functionality of our forked golang port of [s2geometry lib here](https://github.com/blevesearch/geo). + +Users can continue to index and query `geopoint` field type and the existing queries like, + +- Point Distance +- Bounded Rectangle +- Bounded Polygon + +as before. + +## New Spatial Field Type - geoshape + +We have introduced a field type (`geoshape`) for representing the new spatial types. + +Using the new `geoshape` field type, users can unblock the spatial capabilities +for the [geojson](https://datatracker.ietf.org/doc/html/rfc7946) shapes like, + +- Point +- LineString +- Polygon +- MultiPoint +- MultiLineString +- MultiPolygon +- GeometryCollection + +In addition to these shapes, bleve will also support additional shapes like, + +- Circle +- Envelope (Bounded box) + +To specify GeoJSON data, use a nested field with: + +- a field named type that specifies the GeoJSON object type and the type value will be case-insensitive. +- a field named coordinates that specifies the object's coordinates. + +``` + "fieldName": { + "type": "GeoJSON Type", + "coordinates": + } +``` + +- If specifying latitude and longitude coordinates, list the longitude first and then latitude. +- Valid longitude values are between -180 and 180, both inclusive. +- Valid latitude values are between -90 and 90, both inclusive. +- Shapes would be internally represented as geodesics. +- The GeoJSON specification strongly suggests splitting geometries so that neither of their parts crosses the antimeridian. + + +Examples for the various geojson shapes representations are as below. + +## Point + +The following specifies a [Point](https://tools.ietf.org/html/rfc7946#section-3.1.2) field in a document: + +``` + { + "type": "point", + "coordinates": [75.05687713623047,22.53539059204079] + } +``` + +## Linestring + +The following specifies a [Linestring](https://tools.ietf.org/html/rfc7946#section-3.1.4) field in a document: + + +``` +{ + "type": "linestring", + "coordinates": [ + [ 77.01416015625, 23.0797317624497], + [ 78.134765625, 20.385825381874263] + ] +} +``` + + +## Polygon + +The following specifies a [Polygon](https://tools.ietf.org/html/rfc7946#section-3.1.6) field in a document: + +``` +{ + "type": "polygon", + "coordinates": [ [ [ 85.605, 57.207], + [ 86.396, 55.998], + [ 87.033, 56.716], + [ 85.605, 57.207] + ] ] +} +``` + + +The first and last coordinates must match in order to close the polygon. +And the exterior coordinates have to be in Counter Clockwise Order in a polygon. (CCW) + + +## MultiPoint + +The following specifies a [Multipoint](https://tools.ietf.org/html/rfc7946#section-3.1.3) field in a document: + +``` +{ + "type": "multipoint", + "coordinates": [ + [ -115.8343505859375, 38.45789034424927], + [ -115.81237792968749, 38.19502155795575], + [ -120.80017089843749, 36.54053616262899], + [ -120.67932128906249, 36.33725319397006] + ] +} +``` + +## MultiLineString + +The following specifies a [MultiLineString](https://tools.ietf.org/html/rfc7946#section-3.1.5) field in a document: + +``` +{ + "type": "multilinestring", + "coordinates": [ + [ [ -118.31726074, 35.250105158],[ -117.509765624, 35.3756141] ], + [ [ -118.6962890, 34.624167789],[ -118.317260742, 35.03899204] ], + [ [ -117.9492187, 35.146862906], [ -117.6745605, 34.41144164] ] +] +} +``` + +## MultiPolygon + +The following specifies a [MultiPolygon](https://tools.ietf.org/html/rfc7946#section-3.1.7) field in a document: + +``` +{ + "type": "multipolygon", + "coordinates": [ + [ [ [ -73.958, 40.8003 ], [ -73.9498, 40.7968 ], + [ -73.9737, 40.7648 ], [ -73.9814, 40.7681 ], + [ -73.958, 40.8003 ] ] ], + + + [ [ [ -73.958, 40.8003 ], [ -73.9498, 40.7968 ], + [ -73.9737, 40.7648 ], [ -73.958, 40.8003 ] ] ] + ] +} +``` + + +## GeometryCollection + +The following specifies a [GeometryCollection](https://tools.ietf.org/html/rfc7946#section-3.1.8) field in a document: + +``` +{ + "type": "geometrycollection", + "geometries": [ + { + "type": "multipoint", + "coordinates": [ + [ -73.9580, 40.8003 ], + [ -73.9498, 40.7968 ], + [ -73.9737, 40.7648 ], + [ -73.9814, 40.7681 ] + ] + }, + { + "type": "multilinestring", + "coordinates": [ + [ [ -73.96943, 40.78519 ], [ -73.96082, 40.78095 ] ], + [ [ -73.96415, 40.79229 ], [ -73.95544, 40.78854 ] ], + [ [ -73.97162, 40.78205 ], [ -73.96374, 40.77715 ] ], + [ [ -73.97880, 40.77247 ], [ -73.97036, 40.76811 ] ] + ] + }, + { + "type" : "polygon", + "coordinates" : [ + [ [ 0 , 0 ] , [ 3 , 6 ] , [ 6 , 1 ] , [ 0 , 0 ] ], + [ [ 2 , 2 ] , [ 3 , 3 ] , [ 4 , 2 ] , [ 2 , 2 ] ] + ] + } +] +} +``` + + +## Circle + +If the user wishes to cover a circular region over the earth’s surface, then they could use this shape. +A sample circular shape is as below. + +``` +{ + "type": "circle", + "coordinates": [75.05687713623047,22.53539059204079], + "radius": "1000m" +} +``` + + +Circle is specified over the center point coordinates along with the radius. +Example formats supported for radius are: +"5in" , "5inch" , "7yd" , "7yards", "9ft" , "9feet", "11km", "11kilometers", "3nm" +"3nauticalmiles", "13mm" , "13millimeters", "15cm", "15centimeters", "17mi", "17miles" "19m" or "19meters". + +If the unit cannot be determined, the entire string is parsed and the unit of meters is assumed. + + +## Envelope + +Envelope type, which consists of coordinates for upper left and lower right points of the shape +to represent a bounding rectangle in the format [[minLon, maxLat], [maxLon, minLat]]. + +``` +{ + "type": "envelope", + "coordinates": [ + [72.83, 18.979], + [78.508,17.4555] + ] +} +``` + + +## GeoShape Query + +Geoshape query support three types/filters of spatial querying capability across those +heterogeneous types of documents indexed. + +### Query Structure: + +``` +{ + "query": { + "geometry": { + "shape": { + "type": "", + "coordinates": [[[ ]]] + }, + "relation": "<>" + } + } +} +``` + + +*shapeType* => can be any of the aforementioned types like Point, LineString, Polygon, MultiPoint, +Geometrycollection, MultiLineString, MultiPolygon, Circle and Envelope. + +*filterName* => can be any of the 3 types like *intersects*, *contains* and *within*. + +### Relation + +| FilterName | Description | +| :-----------:| :-----------------------------------------------------------------: | +| `intersects` | Return all documents whose shape field intersects the query geometry. | +| `contains` | Return all documents whose shape field contains the query geometry | +| `within` | Return all documents whose shape field is within the query geometry. | + +------------------------------------------------------------------------------------------------------------------------ + + + +### Older Implementation + First, all of this geo code is a Go adaptation of the [Lucene 5.3.2 sandbox geo support](https://lucene.apache.org/core/5_3_2/sandbox/org/apache/lucene/util/package-summary.html). ## Notes diff --git a/vendor/github.com/blevesearch/bleve/v2/geo/geo_s2plugin_impl.go b/vendor/github.com/blevesearch/bleve/v2/geo/geo_s2plugin_impl.go new file mode 100644 index 0000000..f743d87 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/geo/geo_s2plugin_impl.go @@ -0,0 +1,450 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package geo + +import ( + "encoding/json" + "sync" + + index "github.com/blevesearch/bleve_index_api" + + "github.com/blevesearch/geo/geojson" + "github.com/blevesearch/geo/s2" +) + +const ( + PointType = "point" + MultiPointType = "multipoint" + LineStringType = "linestring" + MultiLineStringType = "multilinestring" + PolygonType = "polygon" + MultiPolygonType = "multipolygon" + GeometryCollectionType = "geometrycollection" + CircleType = "circle" + EnvelopeType = "envelope" +) + +// spatialPluginsMap is spatial plugin cache. +var ( + spatialPluginsMap = make(map[string]index.SpatialAnalyzerPlugin) + pluginsMapLock = sync.RWMutex{} +) + +func init() { + registerS2RegionTermIndexer() +} + +func registerS2RegionTermIndexer() { + spatialPlugin := S2SpatialAnalyzerPlugin{ + s2Indexer: s2.NewRegionTermIndexerWithOptions(initS2IndexerOptions()), + s2Searcher: s2.NewRegionTermIndexerWithOptions(initS2SearcherOptions()), + s2GeoPointsRegionTermIndexer: s2.NewRegionTermIndexerWithOptions(initS2OptionsForGeoPoints()), + } + + RegisterSpatialAnalyzerPlugin(&spatialPlugin) +} + +// RegisterSpatialAnalyzerPlugin registers the given plugin implementation. +func RegisterSpatialAnalyzerPlugin(plugin index.SpatialAnalyzerPlugin) { + pluginsMapLock.Lock() + spatialPluginsMap[plugin.Type()] = plugin + pluginsMapLock.Unlock() +} + +// GetSpatialAnalyzerPlugin retrieves the given implementation type. +func GetSpatialAnalyzerPlugin(typ string) index.SpatialAnalyzerPlugin { + pluginsMapLock.RLock() + rv := spatialPluginsMap[typ] + pluginsMapLock.RUnlock() + return rv +} + +// initS2IndexerOptions returns the options for s2's region +// term indexer for the index time tokens of geojson shapes. +func initS2IndexerOptions() s2.Options { + options := s2.Options{} + // maxLevel control the maximum size of the + // S2Cells used to approximate regions. + options.SetMaxLevel(16) + + // minLevel control the minimum size of the + // S2Cells used to approximate regions. + options.SetMinLevel(2) + + // levelMod value greater than 1 increases the effective branching + // factor of the S2Cell hierarchy by skipping some levels. + options.SetLevelMod(1) + + // maxCells controls the maximum number of cells + // when approximating each s2 region. + options.SetMaxCells(20) + + return options +} + +// initS2SearcherOptions returns the options for s2's region +// term indexer for the query time tokens of geojson shapes. +func initS2SearcherOptions() s2.Options { + options := s2.Options{} + // maxLevel control the maximum size of the + // S2Cells used to approximate regions. + options.SetMaxLevel(16) + + // minLevel control the minimum size of the + // S2Cells used to approximate regions. + options.SetMinLevel(2) + + // levelMod value greater than 1 increases the effective branching + // factor of the S2Cell hierarchy by skipping some levels. + options.SetLevelMod(1) + + // maxCells controls the maximum number of cells + // when approximating each s2 region. + options.SetMaxCells(8) + + return options +} + +// initS2OptionsForGeoPoints returns the options for +// s2's region term indexer for the original geopoints. +func initS2OptionsForGeoPoints() s2.Options { + options := s2.Options{} + // maxLevel control the maximum size of the + // S2Cells used to approximate regions. + options.SetMaxLevel(16) + + // minLevel control the minimum size of the + // S2Cells used to approximate regions. + options.SetMinLevel(4) + + // levelMod value greater than 1 increases the effective branching + // factor of the S2Cell hierarchy by skipping some levels. + options.SetLevelMod(2) + + // maxCells controls the maximum number of cells + // when approximating each s2 region. + options.SetMaxCells(8) + + // explicit for geo points. + options.SetPointsOnly(true) + + return options +} + +// S2SpatialAnalyzerPlugin is an implementation of +// the index.SpatialAnalyzerPlugin interface. +type S2SpatialAnalyzerPlugin struct { + s2Indexer *s2.RegionTermIndexer + s2Searcher *s2.RegionTermIndexer + s2GeoPointsRegionTermIndexer *s2.RegionTermIndexer +} + +func (s *S2SpatialAnalyzerPlugin) Type() string { + return "s2" +} + +func (s *S2SpatialAnalyzerPlugin) GetIndexTokens(queryShape index.GeoJSON) []string { + var rv []string + shapes := []index.GeoJSON{queryShape} + if gc, ok := queryShape.(*geojson.GeometryCollection); ok { + shapes = gc.Shapes + } + + for _, shape := range shapes { + if s2t, ok := shape.(s2Tokenizable); ok { + rv = append(rv, s2t.IndexTokens(s.s2Indexer)...) + } else if s2t, ok := shape.(s2TokenizableEx); ok { + rv = append(rv, s2t.IndexTokens(s)...) + } + } + + return geojson.DeduplicateTerms(rv) +} + +func (s *S2SpatialAnalyzerPlugin) GetQueryTokens(queryShape index.GeoJSON) []string { + var rv []string + shapes := []index.GeoJSON{queryShape} + if gc, ok := queryShape.(*geojson.GeometryCollection); ok { + shapes = gc.Shapes + } + + for _, shape := range shapes { + if s2t, ok := shape.(s2Tokenizable); ok { + rv = append(rv, s2t.QueryTokens(s.s2Searcher)...) + } else if s2t, ok := shape.(s2TokenizableEx); ok { + rv = append(rv, s2t.QueryTokens(s)...) + } + } + + return geojson.DeduplicateTerms(rv) +} + +// ------------------------------------------------------------------------ +// s2Tokenizable is an optional interface for shapes that support +// the generation of s2 based tokens that can be used for both +// indexing and querying. + +type s2Tokenizable interface { + // IndexTokens returns the tokens for indexing. + IndexTokens(*s2.RegionTermIndexer) []string + + // QueryTokens returns the tokens for searching. + QueryTokens(*s2.RegionTermIndexer) []string +} + +// ------------------------------------------------------------------------ +// s2TokenizableEx is an optional interface for shapes that support +// the generation of s2 based tokens that can be used for both +// indexing and querying. This is intended for the older geopoint +// indexing and querying. +type s2TokenizableEx interface { + // IndexTokens returns the tokens for indexing. + IndexTokens(*S2SpatialAnalyzerPlugin) []string + + // QueryTokens returns the tokens for searching. + QueryTokens(*S2SpatialAnalyzerPlugin) []string +} + +//---------------------------------------------------------------------------------- + +func (p *Point) Type() string { + return PointType +} + +func (p *Point) Value() ([]byte, error) { + return json.Marshal(p) +} + +func (p *Point) Intersects(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (p *Point) Contains(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (p *Point) IndexTokens(s *S2SpatialAnalyzerPlugin) []string { + return s.s2GeoPointsRegionTermIndexer.GetIndexTermsForPoint(s2.PointFromLatLng( + s2.LatLngFromDegrees(p.Lat, p.Lon)), "") +} + +func (p *Point) QueryTokens(s *S2SpatialAnalyzerPlugin) []string { + return nil +} + +//---------------------------------------------------------------------------------- + +type boundedRectangle struct { + minLat float64 + maxLat float64 + minLon float64 + maxLon float64 +} + +func NewBoundedRectangle(minLat, minLon, maxLat, + maxLon float64) *boundedRectangle { + return &boundedRectangle{minLat: minLat, + maxLat: maxLat, minLon: minLon, maxLon: maxLon} +} + +func (br *boundedRectangle) Type() string { + // placeholder implementation + return "boundedRectangle" +} + +func (br *boundedRectangle) Value() ([]byte, error) { + return json.Marshal(br) +} + +func (p *boundedRectangle) Intersects(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (p *boundedRectangle) Contains(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (br *boundedRectangle) IndexTokens(s *S2SpatialAnalyzerPlugin) []string { + return nil +} + +func (br *boundedRectangle) QueryTokens(s *S2SpatialAnalyzerPlugin) []string { + rect := s2.RectFromDegrees(br.minLat, br.minLon, br.maxLat, br.maxLon) + + // obtain the terms to be searched for the given bounding box. + terms := s.s2GeoPointsRegionTermIndexer.GetQueryTermsForRegion(rect, "") + + return geojson.StripCoveringTerms(terms) +} + +//---------------------------------------------------------------------------------- + +type boundedPolygon struct { + coordinates []Point +} + +func NewBoundedPolygon(coordinates []Point) *boundedPolygon { + return &boundedPolygon{coordinates: coordinates} +} + +func (bp *boundedPolygon) Type() string { + // placeholder implementation + return "boundedPolygon" +} + +func (bp *boundedPolygon) Value() ([]byte, error) { + return json.Marshal(bp) +} + +func (p *boundedPolygon) Intersects(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (p *boundedPolygon) Contains(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (bp *boundedPolygon) IndexTokens(s *S2SpatialAnalyzerPlugin) []string { + return nil +} + +func (bp *boundedPolygon) QueryTokens(s *S2SpatialAnalyzerPlugin) []string { + vertices := make([]s2.Point, len(bp.coordinates)) + for i, point := range bp.coordinates { + vertices[i] = s2.PointFromLatLng( + s2.LatLngFromDegrees(point.Lat, point.Lon)) + } + s2polygon := s2.PolygonFromOrientedLoops([]*s2.Loop{s2.LoopFromPoints(vertices)}) + + // obtain the terms to be searched for the given polygon. + terms := s.s2GeoPointsRegionTermIndexer.GetQueryTermsForRegion( + s2polygon.CapBound(), "") + + return geojson.StripCoveringTerms(terms) +} + +//---------------------------------------------------------------------------------- + +type pointDistance struct { + dist float64 + centerLat float64 + centerLon float64 +} + +func (p *pointDistance) Type() string { + // placeholder implementation + return "pointDistance" +} + +func (p *pointDistance) Value() ([]byte, error) { + return json.Marshal(p) +} + +func NewPointDistance(centerLat, centerLon, + dist float64) *pointDistance { + return &pointDistance{centerLat: centerLat, + centerLon: centerLon, dist: dist} +} + +func (p *pointDistance) Intersects(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (p *pointDistance) Contains(s index.GeoJSON) (bool, error) { + // placeholder implementation + return false, nil +} + +func (pd *pointDistance) IndexTokens(s *S2SpatialAnalyzerPlugin) []string { + return nil +} + +func (pd *pointDistance) QueryTokens(s *S2SpatialAnalyzerPlugin) []string { + // obtain the covering query region from the given points. + queryRegion := s2.CapFromCenterAndRadius(pd.centerLat, + pd.centerLon, pd.dist) + + // obtain the query terms for the query region. + terms := s.s2GeoPointsRegionTermIndexer.GetQueryTermsForRegion(queryRegion, "") + + return geojson.StripCoveringTerms(terms) +} + +// ------------------------------------------------------------------------ + +// NewGeometryCollection instantiate a geometrycollection +// and prefix the byte contents with certain glue bytes that +// can be used later while filering the doc values. +func NewGeometryCollection(coordinates [][][][][]float64, + typs []string) (index.GeoJSON, []byte, error) { + + return geojson.NewGeometryCollection(coordinates, typs) +} + +// NewGeoCircleShape instantiate a circle shape and +// prefix the byte contents with certain glue bytes that +// can be used later while filering the doc values. +func NewGeoCircleShape(cp []float64, + radius string) (index.GeoJSON, []byte, error) { + return geojson.NewGeoCircleShape(cp, radius) +} + +func NewGeoJsonShape(coordinates [][][][]float64, typ string) ( + index.GeoJSON, []byte, error) { + return geojson.NewGeoJsonShape(coordinates, typ) +} + +func NewGeoJsonPoint(points []float64) index.GeoJSON { + return geojson.NewGeoJsonPoint(points) +} + +func NewGeoJsonMultiPoint(points [][]float64) index.GeoJSON { + return geojson.NewGeoJsonMultiPoint(points) +} + +func NewGeoJsonLinestring(points [][]float64) index.GeoJSON { + return geojson.NewGeoJsonLinestring(points) +} + +func NewGeoJsonMultilinestring(points [][][]float64) index.GeoJSON { + return geojson.NewGeoJsonMultilinestring(points) +} + +func NewGeoJsonPolygon(points [][][]float64) index.GeoJSON { + return geojson.NewGeoJsonPolygon(points) +} + +func NewGeoJsonMultiPolygon(points [][][][]float64) index.GeoJSON { + return geojson.NewGeoJsonMultiPolygon(points) +} + +func NewGeoCircle(points []float64, radius string) index.GeoJSON { + return geojson.NewGeoCircle(points, radius) +} + +func NewGeoEnvelope(points [][]float64) index.GeoJSON { + return geojson.NewGeoEnvelope(points) +} + +func ParseGeoJSONShape(input json.RawMessage) (index.GeoJSON, error) { + return geojson.ParseGeoJSONShape(input) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/geo/parse.go b/vendor/github.com/blevesearch/bleve/v2/geo/parse.go index 8286805..cc7bedd 100644 --- a/vendor/github.com/blevesearch/bleve/v2/geo/parse.go +++ b/vendor/github.com/blevesearch/bleve/v2/geo/parse.go @@ -179,3 +179,280 @@ type later interface { type lnger interface { Lng() float64 } + +// GlueBytes primarily for quicker filtering of docvalues +// during the filtering phase. +var GlueBytes = []byte("##") + +var GlueBytesOffset = len(GlueBytes) + +func extractCoordinates(thing interface{}) []float64 { + thingVal := reflect.ValueOf(thing) + if !thingVal.IsValid() { + return nil + } + + if thingVal.Kind() == reflect.Slice { + // must be length 2 + if thingVal.Len() == 2 { + var foundLon, foundLat bool + var lon, lat float64 + first := thingVal.Index(0) + if first.CanInterface() { + firstVal := first.Interface() + lon, foundLon = extractNumericVal(firstVal) + } + second := thingVal.Index(1) + if second.CanInterface() { + secondVal := second.Interface() + lat, foundLat = extractNumericVal(secondVal) + } + + if !foundLon || !foundLat { + return nil + } + + return []float64{lon, lat} + } + } + return nil +} + +func extract2DCoordinates(thing interface{}) [][]float64 { + thingVal := reflect.ValueOf(thing) + if !thingVal.IsValid() { + return nil + } + + rv := make([][]float64, 0, 8) + if thingVal.Kind() == reflect.Slice { + for j := 0; j < thingVal.Len(); j++ { + edges := thingVal.Index(j).Interface() + if es, ok := edges.([]interface{}); ok { + v := extractCoordinates(es) + if len(v) == 2 { + rv = append(rv, v) + } + } + } + + return rv + } + + return nil +} + +func extract3DCoordinates(thing interface{}) (c [][][]float64) { + coords := reflect.ValueOf(thing) + for i := 0; i < coords.Len(); i++ { + vals := coords.Index(i) + + edges := vals.Interface() + if es, ok := edges.([]interface{}); ok { + loop := extract2DCoordinates(es) + if len(loop) > 0 { + c = append(c, loop) + } + } + } + + return c +} + +func extract4DCoordinates(thing interface{}) (rv [][][][]float64) { + thingVal := reflect.ValueOf(thing) + if !thingVal.IsValid() { + return nil + } + + if thingVal.Kind() == reflect.Slice { + for j := 0; j < thingVal.Len(); j++ { + c := extract3DCoordinates(thingVal.Index(j).Interface()) + rv = append(rv, c) + } + } + + return rv +} + +func ParseGeoShapeField(thing interface{}) (interface{}, string, error) { + thingVal := reflect.ValueOf(thing) + if !thingVal.IsValid() { + return nil, "", nil + } + + var shape string + var coordValue interface{} + + if thingVal.Kind() == reflect.Map { + iter := thingVal.MapRange() + for iter.Next() { + if iter.Key().String() == "type" { + shape = iter.Value().Interface().(string) + continue + } + + if iter.Key().String() == "coordinates" { + coordValue = iter.Value().Interface() + } + } + } + + return coordValue, strings.ToLower(shape), nil +} + +func extractGeoShape(thing interface{}) ([][][][]float64, string, bool) { + coordValue, typ, err := ParseGeoShapeField(thing) + if err != nil { + return nil, "", false + } + + return ExtractGeoShapeCoordinates(coordValue, typ) +} + +// ExtractGeometryCollection takes an interface{} and tries it's best to +// interpret all the member geojson shapes within it. +func ExtractGeometryCollection(thing interface{}) ([][][][][]float64, []string, bool) { + thingVal := reflect.ValueOf(thing) + if !thingVal.IsValid() { + return nil, nil, false + } + var rv [][][][][]float64 + var types []string + var f bool + + if thingVal.Kind() == reflect.Map { + iter := thingVal.MapRange() + for iter.Next() { + + if iter.Key().String() == "type" { + continue + } + + if iter.Key().String() == "geometries" { + collection := iter.Value().Interface() + items := reflect.ValueOf(collection) + + for j := 0; j < items.Len(); j++ { + coords, shape, found := extractGeoShape(items.Index(j).Interface()) + if found { + f = found + rv = append(rv, coords) + types = append(types, shape) + } + } + } + } + } + + return rv, types, f +} + +// ExtractCircle takes an interface{} and tries it's best to +// interpret the center point coordinates and the radius for a +// given circle shape. +func ExtractCircle(thing interface{}) ([]float64, string, bool) { + thingVal := reflect.ValueOf(thing) + if !thingVal.IsValid() { + return nil, "", false + } + var rv []float64 + var radiusStr string + + if thingVal.Kind() == reflect.Map { + iter := thingVal.MapRange() + for iter.Next() { + + if iter.Key().String() == "radius" { + radiusStr = iter.Value().Interface().(string) + continue + } + + if iter.Key().String() == "coordinates" { + lng, lat, found := ExtractGeoPoint(iter.Value().Interface()) + if !found { + return nil, radiusStr, false + } + rv = append(rv, lng) + rv = append(rv, lat) + } + } + } + + return rv, radiusStr, true +} + +// ExtractGeoShapeCoordinates takes an interface{} and tries it's best to +// interpret the coordinates for any of the given geoshape typ like +// a point, multipoint, linestring, multilinestring, polygon, multipolygon, +func ExtractGeoShapeCoordinates(coordValue interface{}, + typ string) ([][][][]float64, string, bool) { + var rv [][][][]float64 + if typ == PointType { + point := extractCoordinates(coordValue) + + // ignore the contents with invalid entry. + if len(point) < 2 { + return nil, typ, false + } + + rv = [][][][]float64{{{point}}} + return rv, typ, true + } + + if typ == MultiPointType || typ == LineStringType || + typ == EnvelopeType { + coords := extract2DCoordinates(coordValue) + + // ignore the contents with invalid entry. + if len(coords) == 0 { + return nil, typ, false + } + + if typ == EnvelopeType && len(coords) != 2 { + return nil, typ, false + } + + if typ == LineStringType && len(coords) < 2 { + return nil, typ, false + } + + rv = [][][][]float64{{coords}} + return rv, typ, true + } + + if typ == PolygonType || typ == MultiLineStringType { + coords := extract3DCoordinates(coordValue) + + // ignore the contents with invalid entry. + if len(coords) == 0 { + return nil, typ, false + } + + if typ == PolygonType && len(coords[0]) < 3 || + typ == MultiLineStringType && len(coords[0]) < 2 { + return nil, typ, false + } + + rv = [][][][]float64{coords} + return rv, typ, true + } + + if typ == MultiPolygonType { + rv = extract4DCoordinates(coordValue) + + // ignore the contents with invalid entry. + if len(rv) == 0 || len(rv[0]) == 0 { + return nil, typ, false + + } + + if len(rv[0][0]) < 3 { + return nil, typ, false + } + + return rv, typ, true + } + + return rv, typ, false +} diff --git a/vendor/github.com/blevesearch/bleve/v2/go.mod b/vendor/github.com/blevesearch/bleve/v2/go.mod index a1a6b59..8f0366b 100644 --- a/vendor/github.com/blevesearch/bleve/v2/go.mod +++ b/vendor/github.com/blevesearch/bleve/v2/go.mod @@ -1,29 +1,42 @@ module github.com/blevesearch/bleve/v2 -go 1.13 +go 1.18 require ( - github.com/RoaringBitmap/roaring v0.4.23 - github.com/blevesearch/bleve_index_api v1.0.0 + github.com/RoaringBitmap/roaring v0.9.4 + github.com/bits-and-blooms/bitset v1.2.0 + github.com/blevesearch/bleve_index_api v1.0.5 + github.com/blevesearch/geo v0.1.16 + github.com/blevesearch/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/blevesearch/go-porterstemmer v1.0.3 - github.com/blevesearch/scorch_segment_api/v2 v2.0.1 + github.com/blevesearch/goleveldb v1.0.1 + github.com/blevesearch/gtreap v0.1.1 + github.com/blevesearch/scorch_segment_api/v2 v2.1.4 github.com/blevesearch/segment v0.9.0 + github.com/blevesearch/snowball v0.6.1 github.com/blevesearch/snowballstem v0.9.0 github.com/blevesearch/upsidedown_store_api v1.0.1 - github.com/blevesearch/vellum v1.0.3 - github.com/blevesearch/zapx/v11 v11.2.0 - github.com/blevesearch/zapx/v12 v12.2.0 - github.com/blevesearch/zapx/v13 v13.2.0 - github.com/blevesearch/zapx/v14 v14.2.0 - github.com/blevesearch/zapx/v15 v15.2.0 - github.com/couchbase/moss v0.1.0 + github.com/blevesearch/vellum v1.0.9 + github.com/blevesearch/zapx/v11 v11.3.7 + github.com/blevesearch/zapx/v12 v12.3.7 + github.com/blevesearch/zapx/v13 v13.3.7 + github.com/blevesearch/zapx/v14 v14.3.7 + github.com/blevesearch/zapx/v15 v15.3.8 + github.com/couchbase/moss v0.2.0 github.com/golang/protobuf v1.3.2 - github.com/kljensen/snowball v0.6.0 - github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 - github.com/spf13/cobra v0.0.5 - github.com/steveyen/gtreap v0.1.0 - github.com/syndtr/goleveldb v1.0.0 - github.com/willf/bitset v1.1.10 + github.com/spf13/cobra v1.4.0 go.etcd.io/bbolt v1.3.5 - golang.org/x/text v0.3.0 + golang.org/x/text v0.3.7 +) + +require ( + github.com/blevesearch/mmap-go v1.0.4 // indirect + github.com/couchbase/ghistogram v0.1.0 // indirect + github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/json-iterator/go v0.0.0-20171115153421-f7279a603ede // indirect + github.com/mschoch/smat v0.2.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect ) diff --git a/vendor/github.com/blevesearch/bleve/v2/go.sum b/vendor/github.com/blevesearch/bleve/v2/go.sum index e1e8edd..f4f9602 100644 --- a/vendor/github.com/blevesearch/bleve/v2/go.sum +++ b/vendor/github.com/blevesearch/bleve/v2/go.sum @@ -1,71 +1,68 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= -github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= -github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= +github.com/RoaringBitmap/roaring v0.9.4 h1:ckvZSX5gwCRaJYBNe7syNawCU5oruY9gQmjXlp4riwo= +github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blevesearch/bleve_index_api v1.0.5 h1:Lc986kpC4Z0/n1g3gg8ul7H+lxgOQPcXb9SxvQGu+tw= +github.com/blevesearch/bleve_index_api v1.0.5/go.mod h1:YXMDwaXFFXwncRS8UobWs7nvo0DmusriM1nztTlj1ms= +github.com/blevesearch/geo v0.1.16 h1:unVaqUmlwprk56596OQRkGjtq1VZ8XFWSARj+h2cIBY= +github.com/blevesearch/geo v0.1.16/go.mod h1:a1OlySNE+oDQ5qY0vJGYNoLIsMpbKbx8dnmuRP8D7H0= +github.com/blevesearch/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:kDy+zgJFJJoJYBvdfBSiZYBbdsUL0XcjHYWezpQBGPA= +github.com/blevesearch/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:9eJDeqxJ3E7WnLebQUlPD7ZjSce7AnDb9vjGmMCbD0A= github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= -github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= +github.com/blevesearch/goleveldb v1.0.1 h1:iAtV2Cu5s0GD1lwUiekkFHe2gTMCCNVj2foPclDLIFI= +github.com/blevesearch/goleveldb v1.0.1/go.mod h1:WrU8ltZbIp0wAoig/MHbrPCXSOLpe79nz5lv5nqfYrQ= +github.com/blevesearch/gtreap v0.1.1 h1:2JWigFrzDMR+42WGIN/V2p0cUvn4UP3C4Q5nmaZGW8Y= +github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk= github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1 h1:fd+hPtZ8GsbqPK1HslGp7Vhoik4arZteA/IsCEgOisw= -github.com/blevesearch/scorch_segment_api/v2 v2.0.1/go.mod h1:lq7yK2jQy1yQjtjTfU931aVqz7pYxEudHaDwOt1tXfU= +github.com/blevesearch/mmap-go v1.0.4 h1:OVhDhT5B/M1HNPpYPBKIEJaD0F3Si+CrEKULGCDPWmc= +github.com/blevesearch/mmap-go v1.0.4/go.mod h1:EWmEAOmdAS9z/pi/+Toxu99DnsbhG1TIxUoRmJw/pSs= +github.com/blevesearch/scorch_segment_api/v2 v2.1.4 h1:LmGmo5twU3gV+natJbKmOktS9eMhokPGKWuR+jX84vk= +github.com/blevesearch/scorch_segment_api/v2 v2.1.4/go.mod h1:PgVnbbg/t1UkgezPDu8EHLi1BHQ17xUwsFdU6NnOYS0= github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= +github.com/blevesearch/snowball v0.6.1 h1:cDYjn/NCH+wwt2UdehaLpr2e4BwLIjN4V/TdLsL+B5A= +github.com/blevesearch/snowball v0.6.1/go.mod h1:ZF0IBg5vgpeoUhnMza2v0A/z8m1cWPlwhke08LpNusg= github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU= github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q= -github.com/blevesearch/vellum v1.0.3 h1:U86G41A7CtXNzzpIJHM8lSTUqz1Mp8U870TkcdCzZc8= -github.com/blevesearch/vellum v1.0.3/go.mod h1:2u5ax02KeDuNWu4/C+hVQMD6uLN4txH1JbtpaDNLJRo= -github.com/blevesearch/zapx/v11 v11.2.0 h1:GBkCJYsyj3eIU4+aiLPxoMz1PYvDbQZl/oXHIBZIP60= -github.com/blevesearch/zapx/v11 v11.2.0/go.mod h1:gN/a0alGw1FZt/YGTo1G6Z6XpDkeOfujX5exY9sCQQM= -github.com/blevesearch/zapx/v12 v12.2.0 h1:dyRcSoZVO1jktL4UpGkCEF1AYa3xhKPirh4/N+Va+Ww= -github.com/blevesearch/zapx/v12 v12.2.0/go.mod h1:fdjwvCwWWwJW/EYTYGtAp3gBA0geCYGLcVTtJEZnY6A= -github.com/blevesearch/zapx/v13 v13.2.0 h1:mUqbaqQABp8nBE4t4q2qMyHCCq4sykoV8r7aJk4ih3s= -github.com/blevesearch/zapx/v13 v13.2.0/go.mod h1:o5rAy/lRS5JpAbITdrOHBS/TugWYbkcYZTz6VfEinAQ= -github.com/blevesearch/zapx/v14 v14.2.0 h1:UsfRqvM9RJxKNKrkR1U7aYc1cv9MWx719fsAjbF6joI= -github.com/blevesearch/zapx/v14 v14.2.0/go.mod h1:GNgZusc1p4ot040cBQMRGEZobvwjCquiEKYh1xLFK9g= -github.com/blevesearch/zapx/v15 v15.2.0 h1:ZpibwcrrOaeslkOw3sJ7npP7KDgRHI/DkACjKTqFwyM= -github.com/blevesearch/zapx/v15 v15.2.0/go.mod h1:MmQceLpWfME4n1WrBFIwplhWmaQbQqLQARpaKUEOs/A= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/blevesearch/vellum v1.0.9 h1:PL+NWVk3dDGPCV0hoDu9XLLJgqU4E5s/dOeEJByQ2uQ= +github.com/blevesearch/vellum v1.0.9/go.mod h1:ul1oT0FhSMDIExNjIxHqJoGpVrBpKCdgDQNxfqgJt7k= +github.com/blevesearch/zapx/v11 v11.3.7 h1:Y6yIAF/DVPiqZUA/jNgSLXmqewfzwHzuwfKyfdG+Xaw= +github.com/blevesearch/zapx/v11 v11.3.7/go.mod h1:Xk9Z69AoAWIOvWudNDMlxJDqSYGf90LS0EfnaAIvXCA= +github.com/blevesearch/zapx/v12 v12.3.7 h1:DfQ6rsmZfEK4PzzJJRXjiM6AObG02+HWvprlXQ1Y7eI= +github.com/blevesearch/zapx/v12 v12.3.7/go.mod h1:SgEtYIBGvM0mgIBn2/tQE/5SdrPXaJUaT/kVqpAPxm0= +github.com/blevesearch/zapx/v13 v13.3.7 h1:igIQg5eKmjw168I7av0Vtwedf7kHnQro/M+ubM4d2l8= +github.com/blevesearch/zapx/v13 v13.3.7/go.mod h1:yyrB4kJ0OT75UPZwT/zS+Ru0/jYKorCOOSY5dBzAy+s= +github.com/blevesearch/zapx/v14 v14.3.7 h1:gfe+fbWslDWP/evHLtp/GOvmNM3sw1BbqD7LhycBX20= +github.com/blevesearch/zapx/v14 v14.3.7/go.mod h1:9J/RbOkqZ1KSjmkOes03AkETX7hrXT0sFMpWH4ewC4w= +github.com/blevesearch/zapx/v15 v15.3.8 h1:q4uMngBHzL1IIhRc8AJUEkj6dGOE3u1l3phLu7hq8uk= +github.com/blevesearch/zapx/v15 v15.3.8/go.mod h1:m7Y6m8soYUvS7MjN9eKlz1xrLCcmqfFadmu7GhWIrLY= github.com/couchbase/ghistogram v0.1.0 h1:b95QcQTCzjTUocDXp/uMgSNQi8oj1tGwnJ4bODWZnps= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= -github.com/couchbase/moss v0.1.0 h1:HCL+xxHUwmOaL44kMM/gU08OW6QGCui1WVFO58bjhNI= -github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/couchbase/moss v0.2.0 h1:VCYrMzFwEryyhRSeI+/b3tRBSeTpi/8gn5Kf6dxqn+o= +github.com/couchbase/moss v0.2.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo= +github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kljensen/snowball v0.6.0 h1:6DZLCcZeL0cLfodx+Md4/OLC6b/bfurWUOUGs1ydfOU= -github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/json-iterator/go v0.0.0-20171115153421-f7279a603ede h1:YrgBGwxMRK0Vq0WSCWFaZUnTsrA/PZE/xs1QZh+/edg= +github.com/json-iterator/go v0.0.0-20171115153421-f7279a603ede/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -73,56 +70,38 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= -github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/blevesearch/bleve/v2/index.go b/vendor/github.com/blevesearch/bleve/v2/index.go index e08271e..e297122 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index.go +++ b/vendor/github.com/blevesearch/bleve/v2/index.go @@ -16,6 +16,7 @@ package bleve import ( "context" + "github.com/blevesearch/bleve/v2/index/upsidedown" "github.com/blevesearch/bleve/v2/document" @@ -306,3 +307,15 @@ type Builder interface { func NewBuilder(path string, mapping mapping.IndexMapping, config map[string]interface{}) (Builder, error) { return newBuilder(path, mapping, config) } + +// IndexCopyable is an index which supports an online copy operation +// of the index. +type IndexCopyable interface { + // CopyTo creates a fully functional copy of the index at the + // specified destination directory implementation. + CopyTo(d index.Directory) error +} + +// FileSystemDirectory is the default implementation for the +// index.Directory interface. +type FileSystemDirectory string diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go index 3311bd0..d956752 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/builder.go @@ -140,7 +140,7 @@ func (o *Builder) executeBatchLOCKED(batch *index.Batch) (err error) { // insert _id field doc.AddIDField() // perform analysis directly - analyze(doc) + analyze(doc, nil) analysisResults = append(analysisResults, doc) } } @@ -304,7 +304,7 @@ func (o *Builder) Close() error { } // fill the root bolt with this fake index snapshot - _, _, err = prepareBoltSnapshot(is, tx, o.path, o.segPlugin) + _, _, err = prepareBoltSnapshot(is, tx, o.path, o.segPlugin, nil) if err != nil { _ = tx.Rollback() _ = rootBolt.Close() diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/empty.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/empty.go index 4d47555..34619d4 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/empty.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/empty.go @@ -30,4 +30,12 @@ func (e *emptyPostingsIterator) Size() int { return 0 } +func (e *emptyPostingsIterator) BytesRead() uint64 { + return 0 +} + +func (e *emptyPostingsIterator) ResetBytesRead(uint64) {} + +func (e *emptyPostingsIterator) BytesWritten() uint64 { return 0 } + var anEmptyPostingsIterator = &emptyPostingsIterator{} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go index 8516d41..123e71d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/introducer.go @@ -16,6 +16,7 @@ package scorch import ( "fmt" + "path/filepath" "sync/atomic" "github.com/RoaringBitmap/roaring" @@ -46,6 +47,17 @@ type epochWatcher struct { } func (s *Scorch) introducerLoop() { + defer func() { + if r := recover(); r != nil { + s.fireAsyncError(&AsyncPanicError{ + Source: "introducer", + Path: s.path, + }) + } + + s.asyncTasks.Done() + }() + var epochWatchers []*epochWatcher OUTER: for { @@ -88,8 +100,6 @@ OUTER: } epochWatchers = epochWatchersNext } - - s.asyncTasks.Done() } func (s *Scorch) introduceSegment(next *segmentIntroduction) error { @@ -118,6 +128,7 @@ func (s *Scorch) introduceSegment(next *segmentIntroduction) error { // iterate through current segments var running uint64 var docsToPersistCount, memSegments, fileSegments uint64 + var droppedSegmentFiles []string for i := range root.segment { // see if optimistic work included this segment delta, ok := next.obsoletes[root.segment[i].id] @@ -155,6 +166,9 @@ func (s *Scorch) introduceSegment(next *segmentIntroduction) error { root.segment[i].segment.AddRef() newSnapshot.offsets = append(newSnapshot.offsets, running) running += newss.segment.Count() + } else if seg, ok := newss.segment.(segment.PersistedSegment); ok { + droppedSegmentFiles = append(droppedSegmentFiles, + filepath.Base(seg.Path())) } if isMemorySegment(root.segment[i]) { @@ -219,6 +233,12 @@ func (s *Scorch) introduceSegment(next *segmentIntroduction) error { _ = rootPrev.DecRef() } + // update the removal eligibility for those segment files + // that are not a part of the latest root. + for _, filename := range droppedSegmentFiles { + s.unmarkIneligibleForRemoval(filename) + } + close(next.applied) return nil @@ -257,6 +277,7 @@ func (s *Scorch) introducePersist(persist *persistIntroduction) { deleted: segmentSnapshot.deleted, cachedDocs: segmentSnapshot.cachedDocs, creator: "introducePersist", + mmaped: 1, } newIndexSnapshot.segment[i] = newSegmentSnapshot delete(persist.persisted, segmentSnapshot.id) @@ -323,6 +344,7 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) { // iterate through current segments newSegmentDeleted := roaring.NewBitmap() var running, docsToPersistCount, memSegments, fileSegments uint64 + var droppedSegmentFiles []string for i := range root.segment { segmentID := root.segment[i].id if segSnapAtMerge, ok := nextMerge.old[segmentID]; ok { @@ -365,8 +387,12 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) { } else { fileSegments++ } + } else if root.segment[i].LiveSize() == 0 { + if seg, ok := root.segment[i].segment.(segment.PersistedSegment); ok { + droppedSegmentFiles = append(droppedSegmentFiles, + filepath.Base(seg.Path())) + } } - } // before the newMerge introduction, need to clean the newly @@ -388,6 +414,7 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) { // deleted by the time we reach here, can skip the introduction. if nextMerge.new != nil && nextMerge.new.Count() > newSegmentDeleted.GetCardinality() { + // put new segment at end newSnapshot.segment = append(newSnapshot.segment, &SegmentSnapshot{ id: nextMerge.id, @@ -395,6 +422,7 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) { deleted: newSegmentDeleted, cachedDocs: &cachedDocs{cache: nil}, creator: "introduceMerge", + mmaped: nextMerge.mmaped, }) newSnapshot.offsets = append(newSnapshot.offsets, running) atomic.AddUint64(&s.stats.TotIntroducedSegmentsMerge, 1) @@ -432,6 +460,12 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) { _ = rootPrev.DecRef() } + // update the removal eligibility for those segment files + // that are not a part of the latest root. + for _, filename := range droppedSegmentFiles { + s.unmarkIneligibleForRemoval(filename) + } + // notify requester that we incorporated this nextMerge.notifyCh <- &mergeTaskIntroStatus{ indexSnapshot: newSnapshot, diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go index 1e269af..c5f0e79 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/merge.go @@ -29,12 +29,22 @@ import ( ) func (s *Scorch) mergerLoop() { + defer func() { + if r := recover(); r != nil { + s.fireAsyncError(&AsyncPanicError{ + Source: "merger", + Path: s.path, + }) + } + + s.asyncTasks.Done() + }() + var lastEpochMergePlanned uint64 var ctrlMsg *mergerCtrl mergePlannerOptions, err := s.parseMergePlannerOptions() if err != nil { s.fireAsyncError(fmt.Errorf("mergePlannerOption json parsing err: %v", err)) - s.asyncTasks.Done() return } ctrlMsgDflt := &mergerCtrl{ctx: context.Background(), @@ -130,8 +140,6 @@ OUTER: atomic.AddUint64(&s.stats.TotFileMergeLoopEnd, 1) } - - s.asyncTasks.Done() } type mergerCtrl struct { @@ -209,32 +217,32 @@ func (s *Scorch) parseMergePlannerOptions() (*mergeplan.MergePlanOptions, } type closeChWrapper struct { - ch1 chan struct{} - ctx context.Context - closeCh chan struct{} + ch1 chan struct{} + ctx context.Context + closeCh chan struct{} + cancelCh chan struct{} } func newCloseChWrapper(ch1 chan struct{}, ctx context.Context) *closeChWrapper { - return &closeChWrapper{ch1: ch1, - ctx: ctx, - closeCh: make(chan struct{})} + return &closeChWrapper{ + ch1: ch1, + ctx: ctx, + closeCh: make(chan struct{}), + cancelCh: make(chan struct{}), + } } func (w *closeChWrapper) close() { - select { - case <-w.closeCh: - default: - close(w.closeCh) - } + close(w.closeCh) } func (w *closeChWrapper) listen() { select { case <-w.ch1: - w.close() + close(w.cancelCh) case <-w.ctx.Done(): - w.close() + close(w.cancelCh) case <-w.closeCh: } } @@ -319,8 +327,9 @@ func (s *Scorch) planMergeAtSnapshot(ctx context.Context, fileMergeZapStartTime := time.Now() atomic.AddUint64(&s.stats.TotFileMergeZapBeg, 1) + prevBytesReadTotal := cumulateBytesRead(segmentsToMerge) newDocNums, _, err := s.segPlugin.Merge(segmentsToMerge, docsToDrop, path, - cw.closeCh, s) + cw.cancelCh, s) atomic.AddUint64(&s.stats.TotFileMergeZapEnd, 1) fileMergeZapTime := uint64(time.Since(fileMergeZapStartTime)) @@ -344,6 +353,10 @@ func (s *Scorch) planMergeAtSnapshot(ctx context.Context, atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1) return err } + + totalBytesRead := seg.BytesRead() + prevBytesReadTotal + seg.ResetBytesRead(totalBytesRead) + oldNewDocNums = make(map[uint64][]uint64) for i, segNewDocNums := range newDocNums { oldNewDocNums[task.Segments[i].Id()] = segNewDocNums @@ -358,6 +371,7 @@ func (s *Scorch) planMergeAtSnapshot(ctx context.Context, oldNewDocNums: oldNewDocNums, new: seg, notifyCh: make(chan *mergeTaskIntroStatus), + mmaped: 1, } s.fireEvent(EventKindMergeTaskIntroductionStart, 0) @@ -416,6 +430,15 @@ type segmentMerge struct { oldNewDocNums map[uint64][]uint64 new segment.Segment notifyCh chan *mergeTaskIntroStatus + mmaped uint32 +} + +func cumulateBytesRead(sbs []segment.Segment) uint64 { + var rv uint64 + for _, seg := range sbs { + rv += seg.BytesRead() + } + return rv } // perform a merging of the given SegmentBase instances into a new, diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go index 36a0379..670f0f8 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/persister.go @@ -19,6 +19,7 @@ import ( "encoding/binary" "encoding/json" "fmt" + "io" "io/ioutil" "log" "math" @@ -82,7 +83,16 @@ type persisterOptions struct { type notificationChan chan struct{} func (s *Scorch) persisterLoop() { - defer s.asyncTasks.Done() + defer func() { + if r := recover(); r != nil { + s.fireAsyncError(&AsyncPanicError{ + Source: "persister", + Path: s.path, + }) + } + + s.asyncTasks.Done() + }() var persistWatchers []*epochWatcher var lastPersistedEpoch, lastMergedEpoch uint64 @@ -93,7 +103,6 @@ func (s *Scorch) persisterLoop() { po, err := s.parsePersisterOptions() if err != nil { s.fireAsyncError(fmt.Errorf("persisterOptions json parsing err: %v", err)) - s.asyncTasks.Done() return } @@ -428,8 +437,59 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot) ( return true, nil } +func copyToDirectory(srcPath string, d index.Directory) (int64, error) { + if d == nil { + return 0, nil + } + + dest, err := d.GetWriter(filepath.Join("store", filepath.Base(srcPath))) + if err != nil { + return 0, fmt.Errorf("GetWriter err: %v", err) + } + + sourceFileStat, err := os.Stat(srcPath) + if err != nil { + return 0, err + } + + if !sourceFileStat.Mode().IsRegular() { + return 0, fmt.Errorf("%s is not a regular file", srcPath) + } + + source, err := os.Open(srcPath) + if err != nil { + return 0, err + } + defer source.Close() + defer dest.Close() + return io.Copy(dest, source) +} + +func persistToDirectory(seg segment.UnpersistedSegment, d index.Directory, + path string) error { + if d == nil { + return seg.Persist(path) + } + + sg, ok := seg.(io.WriterTo) + if !ok { + return fmt.Errorf("no io.WriterTo segment implementation found") + } + + w, err := d.GetWriter(filepath.Join("store", filepath.Base(path))) + if err != nil { + return err + } + + _, err = sg.WriteTo(w) + w.Close() + + return err +} + func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string, - segPlugin SegmentPlugin) ([]string, map[uint64]string, error) { + segPlugin SegmentPlugin, d index.Directory) ( + []string, map[uint64]string, error) { snapshotsBucket, err := tx.CreateBucketIfNotExists(boltSnapshotsBucket) if err != nil { return nil, nil, err @@ -482,7 +542,11 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string, switch seg := segmentSnapshot.segment.(type) { case segment.PersistedSegment: segPath := seg.Path() - filename := strings.TrimPrefix(segPath, path+string(os.PathSeparator)) + _, err = copyToDirectory(segPath, d) + if err != nil { + return nil, nil, fmt.Errorf("segment: %s copy err: %v", segPath, err) + } + filename := filepath.Base(segPath) err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename)) if err != nil { return nil, nil, err @@ -491,10 +555,10 @@ func prepareBoltSnapshot(snapshot *IndexSnapshot, tx *bolt.Tx, path string, case segment.UnpersistedSegment: // need to persist this to disk filename := zapFileName(segmentSnapshot.id) - path := path + string(os.PathSeparator) + filename - err = seg.Persist(path) + path := filepath.Join(path, filename) + err := persistToDirectory(seg, d, path) if err != nil { - return nil, nil, fmt.Errorf("error persisting segment: %v", err) + return nil, nil, fmt.Errorf("segment: %s persist err: %v", path, err) } newSegmentPaths[segmentSnapshot.id] = path err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename)) @@ -535,7 +599,7 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) { } }() - filenames, newSegmentPaths, err := prepareBoltSnapshot(snapshot, tx, s.path, s.segPlugin) + filenames, newSegmentPaths, err := prepareBoltSnapshot(snapshot, tx, s.path, s.segPlugin, nil) if err != nil { return err } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go index fa1aaeb..413684c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/scorch.go @@ -73,6 +73,18 @@ type Scorch struct { forceMergeRequestCh chan *mergerCtrl segPlugin SegmentPlugin + + spatialPlugin index.SpatialAnalyzerPlugin +} + +// AsyncPanicError is passed to scorch asyncErrorHandler when panic occurs in scorch background process +type AsyncPanicError struct { + Source string + Path string +} + +func (e *AsyncPanicError) Error() string { + return fmt.Sprintf("%s panic when processing %s", e.Source, e.Path) } type internalStats struct { @@ -112,6 +124,11 @@ func NewScorch(storeName string, } } + typ, ok := config["spatialPlugin"].(string) + if ok { + rv.loadSpatialAnalyzerPlugin(typ) + } + rv.root = &IndexSnapshot{parent: rv, refs: 1, creator: "NewScorch"} ro, ok := config["read_only"].(bool) if ok { @@ -129,6 +146,7 @@ func NewScorch(storeName string, if ok { rv.onAsyncError = RegistryAsyncErrorCallbacks[aecbName] } + return rv, nil } @@ -202,6 +220,15 @@ func (s *Scorch) openBolt() error { var rootBoltOpt = *bolt.DefaultOptions if s.readOnly { rootBoltOpt.ReadOnly = true + rootBoltOpt.OpenFile = func(path string, flag int, mode os.FileMode) (*os.File, error) { + // Bolt appends an O_CREATE flag regardless. + // See - https://github.com/etcd-io/bbolt/blob/v1.3.5/db.go#L210 + // Use os.O_RDONLY only if path exists (#1623) + if _, err := os.Stat(path); os.IsNotExist(err) { + return os.OpenFile(path, flag, mode) + } + return os.OpenFile(path, os.O_RDONLY, mode) + } } else { if s.path != "" { err := os.MkdirAll(s.path, 0700) @@ -265,6 +292,11 @@ func (s *Scorch) openBolt() error { } } + typ, ok := s.config["spatialPlugin"].(string) + if ok { + s.loadSpatialAnalyzerPlugin(typ) + } + return nil } @@ -344,7 +376,7 @@ func (s *Scorch) Batch(batch *index.Batch) (err error) { if doc != nil { // put the work on the queue s.analysisQueue.Queue(func() { - analyze(doc) + analyze(doc, s.setSpatialAnalyzerPlugin) resultChan <- doc }) } @@ -381,6 +413,10 @@ func (s *Scorch) Batch(batch *index.Batch) (err error) { if err != nil { return err } + if segB, ok := newSegment.(segment.DiskStatsReporter); ok { + atomic.AddUint64(&s.stats.TotBytesWrittenAtIndexTime, + segB.BytesWritten()) + } atomic.AddUint64(&s.iStats.newSegBufBytesAdded, bufBytes) } else { atomic.AddUint64(&s.stats.TotBatchesEmpty, 1) @@ -494,6 +530,10 @@ func (s *Scorch) Stats() json.Marshaler { return &s.stats } +func (s *Scorch) BytesReadQueryTime() uint64 { + return s.stats.TotBytesReadAtQueryTime +} + func (s *Scorch) diskFileStats(rootSegmentPaths map[string]struct{}) (uint64, uint64, uint64) { var numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot uint64 @@ -551,7 +591,9 @@ func (s *Scorch) StatsMap() map[string]interface{} { m["index_time"] = m["TotIndexTime"] m["term_searchers_started"] = m["TotTermSearchersStarted"] m["term_searchers_finished"] = m["TotTermSearchersFinished"] + m["num_bytes_read_at_query_time"] = m["TotBytesReadAtQueryTime"] m["num_plain_text_bytes_indexed"] = m["TotIndexedPlainTextBytes"] + m["num_bytes_written_at_index_time"] = m["TotBytesWrittenAtIndexTime"] m["num_items_introduced"] = m["TotIntroducedItems"] m["num_items_persisted"] = m["TotPersistedItems"] m["num_recs_to_persist"] = m["TotItemsToPersist"] @@ -574,12 +616,29 @@ func (s *Scorch) StatsMap() map[string]interface{} { } func (s *Scorch) Analyze(d index.Document) { - analyze(d) + analyze(d, s.setSpatialAnalyzerPlugin) +} + +type customAnalyzerPluginInitFunc func(field index.Field) + +func (s *Scorch) setSpatialAnalyzerPlugin(f index.Field) { + if s.segPlugin != nil { + // check whether the current field is a custom tokenizable + // spatial field then set the spatial analyser plugin for + // overriding the tokenisation during the analysis stage. + if sf, ok := f.(index.TokenizableSpatialField); ok { + sf.SetSpatialAnalyzerPlugin(s.spatialPlugin) + } + } } -func analyze(d index.Document) { +func analyze(d index.Document, fn customAnalyzerPluginInitFunc) { d.VisitFields(func(field index.Field) { if field.Options().IsIndexed() { + if fn != nil { + fn(field) + } + field.Analyze() if d.HasComposite() && field.Name() != "_id" { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go index ea40911..a84d2d5 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/segment_plugin.go @@ -16,9 +16,11 @@ package scorch import ( "fmt" + "github.com/RoaringBitmap/roaring" index "github.com/blevesearch/bleve_index_api" + "github.com/blevesearch/bleve/v2/geo" segment "github.com/blevesearch/scorch_segment_api/v2" zapv11 "github.com/blevesearch/zapx/v11" @@ -131,3 +133,11 @@ func (s *Scorch) loadSegmentPlugin(forcedSegmentType string, s.segPlugin = segPlugin return nil } + +func (s *Scorch) loadSpatialAnalyzerPlugin(typ string) error { + s.spatialPlugin = geo.GetSpatialAnalyzerPlugin(typ) + if s.spatialPlugin == nil { + return fmt.Errorf("unsupported spatial plugin type: %s", typ) + } + return nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go index ac2f344..59828e8 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index.go @@ -16,8 +16,11 @@ package scorch import ( "container/heap" + "context" "encoding/binary" "fmt" + "os" + "path/filepath" "reflect" "sort" "sync" @@ -29,6 +32,7 @@ import ( segment "github.com/blevesearch/scorch_segment_api/v2" "github.com/blevesearch/vellum" lev "github.com/blevesearch/vellum/levenshtein" + bolt "go.etcd.io/bbolt" ) // re usable, threadsafe levenshtein builders @@ -48,6 +52,15 @@ type asynchSegmentResult struct { var reflectStaticSizeIndexSnapshot int +// DefaultFieldTFRCacheThreshold limits the number of TermFieldReaders(TFR) for +// a field in an index snapshot. Without this limit, when recycling TFRs, it is +// possible that a very large number of TFRs may be added to the recycle +// cache, which could eventually lead to significant memory consumption. +// This threshold can be overwritten by users at the library level by changing the +// exported variable, or at the index level by setting the FieldTFRCacheThreshold +// in the kvConfig. +var DefaultFieldTFRCacheThreshold uint64 = 10 + func init() { var is interface{} = IndexSnapshot{} reflectStaticSizeIndexSnapshot = int(reflect.TypeOf(is).Size()) @@ -127,32 +140,36 @@ func (i *IndexSnapshot) updateSize() { } } -func (i *IndexSnapshot) newIndexSnapshotFieldDict(field string, +func (is *IndexSnapshot) newIndexSnapshotFieldDict(field string, makeItr func(i segment.TermDictionary) segment.DictionaryIterator, randomLookup bool) (*IndexSnapshotFieldDict, error) { results := make(chan *asynchSegmentResult) - for index, segment := range i.segment { - go func(index int, segment *SegmentSnapshot) { - dict, err := segment.segment.Dictionary(field) + var totalBytesRead uint64 + for _, s := range is.segment { + go func(s *SegmentSnapshot) { + dict, err := s.segment.Dictionary(field) if err != nil { results <- &asynchSegmentResult{err: err} } else { + if dictStats, ok := dict.(segment.DiskStatsReporter); ok { + atomic.AddUint64(&totalBytesRead, dictStats.BytesRead()) + } if randomLookup { results <- &asynchSegmentResult{dict: dict} } else { results <- &asynchSegmentResult{dictItr: makeItr(dict)} } } - }(index, segment) + }(s) } var err error rv := &IndexSnapshotFieldDict{ - snapshot: i, - cursors: make([]*segmentDictCursor, 0, len(i.segment)), + snapshot: is, + cursors: make([]*segmentDictCursor, 0, len(is.segment)), } - for count := 0; count < len(i.segment); count++ { + for count := 0; count < len(is.segment); count++ { asr := <-results if asr.err != nil && err == nil { err = asr.err @@ -175,6 +192,7 @@ func (i *IndexSnapshot) newIndexSnapshotFieldDict(field string, } } } + rv.bytesRead = totalBytesRead // after ensuring we've read all items on channel if err != nil { return nil, err @@ -188,9 +206,9 @@ func (i *IndexSnapshot) newIndexSnapshotFieldDict(field string, return rv, nil } -func (i *IndexSnapshot) FieldDict(field string) (index.FieldDict, error) { - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(nil, nil, nil) +func (is *IndexSnapshot) FieldDict(field string) (index.FieldDict, error) { + return is.newIndexSnapshotFieldDict(field, func(is segment.TermDictionary) segment.DictionaryIterator { + return is.AutomatonIterator(nil, nil, nil) }, false) } @@ -215,11 +233,11 @@ func calculateExclusiveEndFromInclusiveEnd(inclusiveEnd []byte) []byte { return rv } -func (i *IndexSnapshot) FieldDictRange(field string, startTerm []byte, +func (is *IndexSnapshot) FieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error) { - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { + return is.newIndexSnapshotFieldDict(field, func(is segment.TermDictionary) segment.DictionaryIterator { endTermExclusive := calculateExclusiveEndFromInclusiveEnd(endTerm) - return i.AutomatonIterator(nil, startTerm, endTermExclusive) + return is.AutomatonIterator(nil, startTerm, endTermExclusive) }, false) } @@ -241,15 +259,15 @@ func calculateExclusiveEndFromPrefix(in []byte) []byte { return nil } -func (i *IndexSnapshot) FieldDictPrefix(field string, +func (is *IndexSnapshot) FieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error) { termPrefixEnd := calculateExclusiveEndFromPrefix(termPrefix) - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(nil, termPrefix, termPrefixEnd) + return is.newIndexSnapshotFieldDict(field, func(is segment.TermDictionary) segment.DictionaryIterator { + return is.AutomatonIterator(nil, termPrefix, termPrefixEnd) }, false) } -func (i *IndexSnapshot) FieldDictRegexp(field string, +func (is *IndexSnapshot) FieldDictRegexp(field string, termRegex string) (index.FieldDict, error) { // TODO: potential optimization where the literal prefix represents the, // entire regexp, allowing us to use PrefixIterator(prefixTerm)? @@ -259,12 +277,12 @@ func (i *IndexSnapshot) FieldDictRegexp(field string, return nil, err } - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(a, prefixBeg, prefixEnd) + return is.newIndexSnapshotFieldDict(field, func(is segment.TermDictionary) segment.DictionaryIterator { + return is.AutomatonIterator(a, prefixBeg, prefixEnd) }, false) } -func (i *IndexSnapshot) getLevAutomaton(term string, +func (is *IndexSnapshot) getLevAutomaton(term string, fuzziness uint8) (vellum.Automaton, error) { if fuzziness == 1 { return lb1.BuildDfa(term, fuzziness) @@ -274,9 +292,9 @@ func (i *IndexSnapshot) getLevAutomaton(term string, return nil, fmt.Errorf("fuzziness exceeds the max limit") } -func (i *IndexSnapshot) FieldDictFuzzy(field string, +func (is *IndexSnapshot) FieldDictFuzzy(field string, term string, fuzziness int, prefix string) (index.FieldDict, error) { - a, err := i.getLevAutomaton(term, uint8(fuzziness)) + a, err := is.getLevAutomaton(term, uint8(fuzziness)) if err != nil { return nil, err } @@ -287,18 +305,18 @@ func (i *IndexSnapshot) FieldDictFuzzy(field string, prefixEnd = calculateExclusiveEndFromPrefix(prefixBeg) } - return i.newIndexSnapshotFieldDict(field, func(i segment.TermDictionary) segment.DictionaryIterator { - return i.AutomatonIterator(a, prefixBeg, prefixEnd) + return is.newIndexSnapshotFieldDict(field, func(is segment.TermDictionary) segment.DictionaryIterator { + return is.AutomatonIterator(a, prefixBeg, prefixEnd) }, false) } -func (i *IndexSnapshot) FieldDictContains(field string) (index.FieldDictContains, error) { - return i.newIndexSnapshotFieldDict(field, nil, true) +func (is *IndexSnapshot) FieldDictContains(field string) (index.FieldDictContains, error) { + return is.newIndexSnapshotFieldDict(field, nil, true) } -func (i *IndexSnapshot) DocIDReaderAll() (index.DocIDReader, error) { +func (is *IndexSnapshot) DocIDReaderAll() (index.DocIDReader, error) { results := make(chan *asynchSegmentResult) - for index, segment := range i.segment { + for index, segment := range is.segment { go func(index int, segment *SegmentSnapshot) { results <- &asynchSegmentResult{ index: index, @@ -307,12 +325,12 @@ func (i *IndexSnapshot) DocIDReaderAll() (index.DocIDReader, error) { }(index, segment) } - return i.newDocIDReader(results) + return is.newDocIDReader(results) } -func (i *IndexSnapshot) DocIDReaderOnly(ids []string) (index.DocIDReader, error) { +func (is *IndexSnapshot) DocIDReaderOnly(ids []string) (index.DocIDReader, error) { results := make(chan *asynchSegmentResult) - for index, segment := range i.segment { + for index, segment := range is.segment { go func(index int, segment *SegmentSnapshot) { docs, err := segment.DocNumbers(ids) if err != nil { @@ -326,16 +344,16 @@ func (i *IndexSnapshot) DocIDReaderOnly(ids []string) (index.DocIDReader, error) }(index, segment) } - return i.newDocIDReader(results) + return is.newDocIDReader(results) } -func (i *IndexSnapshot) newDocIDReader(results chan *asynchSegmentResult) (index.DocIDReader, error) { +func (is *IndexSnapshot) newDocIDReader(results chan *asynchSegmentResult) (index.DocIDReader, error) { rv := &IndexSnapshotDocIDReader{ - snapshot: i, - iterators: make([]roaring.IntIterable, len(i.segment)), + snapshot: is, + iterators: make([]roaring.IntIterable, len(is.segment)), } var err error - for count := 0; count < len(i.segment); count++ { + for count := 0; count < len(is.segment); count++ { asr := <-results if asr.err != nil { if err == nil { @@ -354,11 +372,11 @@ func (i *IndexSnapshot) newDocIDReader(results chan *asynchSegmentResult) (index return rv, nil } -func (i *IndexSnapshot) Fields() ([]string, error) { +func (is *IndexSnapshot) Fields() ([]string, error) { // FIXME not making this concurrent for now as it's not used in hot path // of any searches at the moment (just a debug aid) fieldsMap := map[string]struct{}{} - for _, segment := range i.segment { + for _, segment := range is.segment { fields := segment.Fields() for _, field := range fields { fieldsMap[field] = struct{}{} @@ -371,21 +389,21 @@ func (i *IndexSnapshot) Fields() ([]string, error) { return rv, nil } -func (i *IndexSnapshot) GetInternal(key []byte) ([]byte, error) { - return i.internal[string(key)], nil +func (is *IndexSnapshot) GetInternal(key []byte) ([]byte, error) { + return is.internal[string(key)], nil } -func (i *IndexSnapshot) DocCount() (uint64, error) { +func (is *IndexSnapshot) DocCount() (uint64, error) { var rv uint64 - for _, segment := range i.segment { + for _, segment := range is.segment { rv += segment.Count() } return rv, nil } -func (i *IndexSnapshot) Document(id string) (rv index.Document, err error) { +func (is *IndexSnapshot) Document(id string) (rv index.Document, err error) { // FIXME could be done more efficiently directly, but reusing for simplicity - tfr, err := i.TermFieldReader([]byte(id), "_id", false, false, false) + tfr, err := is.TermFieldReader(nil, []byte(id), "_id", false, false, false) if err != nil { return nil, err } @@ -409,14 +427,20 @@ func (i *IndexSnapshot) Document(id string) (rv index.Document, err error) { if err != nil { return nil, err } - segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(docNum) + segmentIndex, localDocNum := is.segmentIndexAndLocalDocNumFromGlobal(docNum) rvd := document.NewDocument(id) - err = i.segment[segmentIndex].VisitDocument(localDocNum, func(name string, typ byte, val []byte, pos []uint64) bool { + + err = is.segment[segmentIndex].VisitDocument(localDocNum, func(name string, typ byte, val []byte, pos []uint64) bool { if name == "_id" { return true } + // track uncompressed stored fields bytes as part of IO stats. + // However, ideally we'd need to track the compressed on-disk value + // Keeping that TODO for now until we have a cleaner way. + rvd.StoredFieldsSize += uint64(len(val)) + // copy value, array positions to preserve them beyond the scope of this callback value := append([]byte(nil), val...) arrayPos := append([]uint64(nil), pos...) @@ -426,12 +450,16 @@ func (i *IndexSnapshot) Document(id string) (rv index.Document, err error) { rvd.AddField(document.NewTextField(name, arrayPos, value)) case 'n': rvd.AddField(document.NewNumericFieldFromBytes(name, arrayPos, value)) + case 'i': + rvd.AddField(document.NewIPFieldFromBytes(name, arrayPos, value)) case 'd': rvd.AddField(document.NewDateTimeFieldFromBytes(name, arrayPos, value)) case 'b': rvd.AddField(document.NewBooleanFieldFromBytes(name, arrayPos, value)) case 'g': rvd.AddField(document.NewGeoPointFieldFromBytes(name, arrayPos, value)) + case 's': + rvd.AddField(document.NewGeoShapeFieldFromBytes(name, arrayPos, value)) } return true @@ -443,24 +471,24 @@ func (i *IndexSnapshot) Document(id string) (rv index.Document, err error) { return rvd, nil } -func (i *IndexSnapshot) segmentIndexAndLocalDocNumFromGlobal(docNum uint64) (int, uint64) { - segmentIndex := sort.Search(len(i.offsets), +func (is *IndexSnapshot) segmentIndexAndLocalDocNumFromGlobal(docNum uint64) (int, uint64) { + segmentIndex := sort.Search(len(is.offsets), func(x int) bool { - return i.offsets[x] > docNum + return is.offsets[x] > docNum }) - 1 - localDocNum := docNum - i.offsets[segmentIndex] + localDocNum := docNum - is.offsets[segmentIndex] return int(segmentIndex), localDocNum } -func (i *IndexSnapshot) ExternalID(id index.IndexInternalID) (string, error) { +func (is *IndexSnapshot) ExternalID(id index.IndexInternalID) (string, error) { docNum, err := docInternalToNumber(id) if err != nil { return "", err } - segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(docNum) + segmentIndex, localDocNum := is.segmentIndexAndLocalDocNumFromGlobal(docNum) - v, err := i.segment[segmentIndex].DocID(localDocNum) + v, err := is.segment[segmentIndex].DocID(localDocNum) if err != nil { return "", err } @@ -471,9 +499,9 @@ func (i *IndexSnapshot) ExternalID(id index.IndexInternalID) (string, error) { return string(v), nil } -func (i *IndexSnapshot) InternalID(id string) (rv index.IndexInternalID, err error) { +func (is *IndexSnapshot) InternalID(id string) (rv index.IndexInternalID, err error) { // FIXME could be done more efficiently directly, but reusing for simplicity - tfr, err := i.TermFieldReader([]byte(id), "_id", false, false, false) + tfr, err := is.TermFieldReader(nil, []byte(id), "_id", false, false, false) if err != nil { return nil, err } @@ -491,18 +519,19 @@ func (i *IndexSnapshot) InternalID(id string) (rv index.IndexInternalID, err err return next.ID, nil } -func (i *IndexSnapshot) TermFieldReader(term []byte, field string, includeFreq, +func (is *IndexSnapshot) TermFieldReader(ctx context.Context, term []byte, field string, includeFreq, includeNorm, includeTermVectors bool) (index.TermFieldReader, error) { - rv := i.allocTermFieldReaderDicts(field) + rv := is.allocTermFieldReaderDicts(field) + rv.ctx = ctx rv.term = term rv.field = field - rv.snapshot = i + rv.snapshot = is if rv.postings == nil { - rv.postings = make([]segment.PostingsList, len(i.segment)) + rv.postings = make([]segment.PostingsList, len(is.segment)) } if rv.iterators == nil { - rv.iterators = make([]segment.PostingsIterator, len(i.segment)) + rv.iterators = make([]segment.PostingsIterator, len(is.segment)) } rv.segmentOffset = 0 rv.includeFreq = includeFreq @@ -512,48 +541,87 @@ func (i *IndexSnapshot) TermFieldReader(term []byte, field string, includeFreq, rv.currID = rv.currID[:0] if rv.dicts == nil { - rv.dicts = make([]segment.TermDictionary, len(i.segment)) - for i, segment := range i.segment { - dict, err := segment.segment.Dictionary(field) + rv.dicts = make([]segment.TermDictionary, len(is.segment)) + for i, s := range is.segment { + // the intention behind this compare and swap operation is + // to make sure that the accounting of the metadata is happening + // only once(which corresponds to this persisted segment's most + // recent segPlugin.Open() call), and any subsequent queries won't + // incur this cost which would essentially be a double counting. + if atomic.CompareAndSwapUint32(&s.mmaped, 1, 0) { + segBytesRead := s.segment.BytesRead() + rv.incrementBytesRead(segBytesRead) + } + dict, err := s.segment.Dictionary(field) if err != nil { return nil, err } + if dictStats, ok := dict.(segment.DiskStatsReporter); ok { + bytesRead := dictStats.BytesRead() + rv.incrementBytesRead(bytesRead) + } rv.dicts[i] = dict } } - for i, segment := range i.segment { - pl, err := rv.dicts[i].PostingsList(term, segment.deleted, rv.postings[i]) + for i, s := range is.segment { + var prevBytesReadPL uint64 + if rv.postings[i] != nil { + prevBytesReadPL = rv.postings[i].BytesRead() + } + pl, err := rv.dicts[i].PostingsList(term, s.deleted, rv.postings[i]) if err != nil { return nil, err } rv.postings[i] = pl + + var prevBytesReadItr uint64 + if rv.iterators[i] != nil { + prevBytesReadItr = rv.iterators[i].BytesRead() + } rv.iterators[i] = pl.Iterator(includeFreq, includeNorm, includeTermVectors, rv.iterators[i]) + + if bytesRead := rv.postings[i].BytesRead(); prevBytesReadPL < bytesRead { + rv.incrementBytesRead(bytesRead - prevBytesReadPL) + } + + if bytesRead := rv.iterators[i].BytesRead(); prevBytesReadItr < bytesRead { + rv.incrementBytesRead(bytesRead - prevBytesReadItr) + } } - atomic.AddUint64(&i.parent.stats.TotTermSearchersStarted, uint64(1)) + atomic.AddUint64(&is.parent.stats.TotTermSearchersStarted, uint64(1)) return rv, nil } -func (i *IndexSnapshot) allocTermFieldReaderDicts(field string) (tfr *IndexSnapshotTermFieldReader) { - i.m2.Lock() - if i.fieldTFRs != nil { - tfrs := i.fieldTFRs[field] +func (is *IndexSnapshot) allocTermFieldReaderDicts(field string) (tfr *IndexSnapshotTermFieldReader) { + is.m2.Lock() + if is.fieldTFRs != nil { + tfrs := is.fieldTFRs[field] last := len(tfrs) - 1 if last >= 0 { tfr = tfrs[last] tfrs[last] = nil - i.fieldTFRs[field] = tfrs[:last] - i.m2.Unlock() + is.fieldTFRs[field] = tfrs[:last] + is.m2.Unlock() return } } - i.m2.Unlock() + is.m2.Unlock() return &IndexSnapshotTermFieldReader{ recycle: true, } } -func (i *IndexSnapshot) recycleTermFieldReader(tfr *IndexSnapshotTermFieldReader) { +func (is *IndexSnapshot) getFieldTFRCacheThreshold() uint64 { + if is.parent.config != nil { + if _, ok := is.parent.config["FieldTFRCacheThreshold"]; ok { + return is.parent.config["FieldTFRCacheThreshold"].(uint64) + } + } + return DefaultFieldTFRCacheThreshold +} + +func (is *IndexSnapshot) recycleTermFieldReader(tfr *IndexSnapshotTermFieldReader) { if !tfr.recycle { // Do not recycle an optimized unadorned term field reader (used for // ConjunctionUnadorned or DisjunctionUnadorned), during when a fresh @@ -562,20 +630,23 @@ func (i *IndexSnapshot) recycleTermFieldReader(tfr *IndexSnapshotTermFieldReader return } - i.parent.rootLock.RLock() - obsolete := i.parent.root != i - i.parent.rootLock.RUnlock() + is.parent.rootLock.RLock() + obsolete := is.parent.root != is + is.parent.rootLock.RUnlock() if obsolete { // if we're not the current root (mutations happened), don't bother recycling return } - i.m2.Lock() - if i.fieldTFRs == nil { - i.fieldTFRs = map[string][]*IndexSnapshotTermFieldReader{} + is.m2.Lock() + if is.fieldTFRs == nil { + is.fieldTFRs = map[string][]*IndexSnapshotTermFieldReader{} + } + if uint64(len(is.fieldTFRs[tfr.field])) < is.getFieldTFRCacheThreshold() { + tfr.bytesRead = 0 + is.fieldTFRs[tfr.field] = append(is.fieldTFRs[tfr.field], tfr) } - i.fieldTFRs[tfr.field] = append(i.fieldTFRs[tfr.field], tfr) - i.m2.Unlock() + is.m2.Unlock() } func docNumberToBytes(buf []byte, in uint64) []byte { @@ -597,11 +668,11 @@ func docInternalToNumber(in index.IndexInternalID) (uint64, error) { return binary.BigEndian.Uint64(in), nil } -func (i *IndexSnapshot) documentVisitFieldTermsOnSegment( +func (is *IndexSnapshot) documentVisitFieldTermsOnSegment( segmentIndex int, localDocNum uint64, fields []string, cFields []string, visitor index.DocValueVisitor, dvs segment.DocVisitState) ( cFieldsOut []string, dvsOut segment.DocVisitState, err error) { - ss := i.segment[segmentIndex] + ss := is.segment[segmentIndex] var vFields []string // fields that are visitable via the segment @@ -656,9 +727,9 @@ func (i *IndexSnapshot) documentVisitFieldTermsOnSegment( return cFields, dvs, nil } -func (i *IndexSnapshot) DocValueReader(fields []string) ( +func (is *IndexSnapshot) DocValueReader(fields []string) ( index.DocValueReader, error) { - return &DocValueReader{i: i, fields: fields, currSegmentIndex: -1}, nil + return &DocValueReader{i: is, fields: fields, currSegmentIndex: -1}, nil } type DocValueReader struct { @@ -668,6 +739,13 @@ type DocValueReader struct { currSegmentIndex int currCachedFields []string + + totalBytesRead uint64 + bytesRead uint64 +} + +func (dvr *DocValueReader) BytesRead() uint64 { + return dvr.totalBytesRead + dvr.bytesRead } func (dvr *DocValueReader) VisitDocValues(id index.IndexInternalID, @@ -685,15 +763,20 @@ func (dvr *DocValueReader) VisitDocValues(id index.IndexInternalID, if dvr.currSegmentIndex != segmentIndex { dvr.currSegmentIndex = segmentIndex dvr.currCachedFields = nil + dvr.totalBytesRead += dvr.bytesRead + dvr.bytesRead = 0 } dvr.currCachedFields, dvr.dvs, err = dvr.i.documentVisitFieldTermsOnSegment( dvr.currSegmentIndex, localDocNum, dvr.fields, dvr.currCachedFields, visitor, dvr.dvs) + if dvr.dvs != nil { + dvr.bytesRead = dvr.dvs.BytesRead() + } return err } -func (i *IndexSnapshot) DumpAll() chan interface{} { +func (is *IndexSnapshot) DumpAll() chan interface{} { rv := make(chan interface{}) go func() { close(rv) @@ -701,7 +784,7 @@ func (i *IndexSnapshot) DumpAll() chan interface{} { return rv } -func (i *IndexSnapshot) DumpDoc(id string) chan interface{} { +func (is *IndexSnapshot) DumpDoc(id string) chan interface{} { rv := make(chan interface{}) go func() { close(rv) @@ -709,7 +792,7 @@ func (i *IndexSnapshot) DumpDoc(id string) chan interface{} { return rv } -func (i *IndexSnapshot) DumpFields() chan interface{} { +func (is *IndexSnapshot) DumpFields() chan interface{} { rv := make(chan interface{}) go func() { close(rv) @@ -717,10 +800,10 @@ func (i *IndexSnapshot) DumpFields() chan interface{} { return rv } -func (i *IndexSnapshot) diskSegmentsPaths() map[string]struct{} { - rv := make(map[string]struct{}, len(i.segment)) - for _, segmentSnapshot := range i.segment { - if seg, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok { +func (is *IndexSnapshot) diskSegmentsPaths() map[string]struct{} { + rv := make(map[string]struct{}, len(is.segment)) + for _, s := range is.segment { + if seg, ok := s.segment.(segment.PersistedSegment); ok { rv[seg.Path()] = struct{}{} } } @@ -729,12 +812,12 @@ func (i *IndexSnapshot) diskSegmentsPaths() map[string]struct{} { // reClaimableDocsRatio gives a ratio about the obsoleted or // reclaimable documents present in a given index snapshot. -func (i *IndexSnapshot) reClaimableDocsRatio() float64 { +func (is *IndexSnapshot) reClaimableDocsRatio() float64 { var totalCount, liveCount uint64 - for _, segmentSnapshot := range i.segment { - if _, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok { - totalCount += uint64(segmentSnapshot.FullSize()) - liveCount += uint64(segmentSnapshot.Count()) + for _, s := range is.segment { + if _, ok := s.segment.(segment.PersistedSegment); ok { + totalCount += uint64(s.FullSize()) + liveCount += uint64(s.Count()) } } @@ -762,3 +845,63 @@ OUTER: } return rv } + +func (is *IndexSnapshot) CopyTo(d index.Directory) error { + // get the root bolt file. + w, err := d.GetWriter(filepath.Join("store", "root.bolt")) + if err != nil || w == nil { + return fmt.Errorf("failed to create the root.bolt file, err: %v", err) + } + rootFile, ok := w.(*os.File) + if !ok { + return fmt.Errorf("invalid root.bolt file found") + } + + copyBolt, err := bolt.Open(rootFile.Name(), 0600, nil) + if err != nil { + return err + } + defer func() { + w.Close() + if cerr := copyBolt.Close(); cerr != nil && err == nil { + err = cerr + } + }() + + // start a write transaction + tx, err := copyBolt.Begin(true) + if err != nil { + return err + } + + _, _, err = prepareBoltSnapshot(is, tx, "", is.parent.segPlugin, d) + if err != nil { + _ = tx.Rollback() + return fmt.Errorf("error backing up index snapshot: %v", err) + } + + // commit bolt data + err = tx.Commit() + if err != nil { + return fmt.Errorf("error commit tx to backup root bolt: %v", err) + } + + return copyBolt.Sync() +} + +func (is *IndexSnapshot) UpdateIOStats(val uint64) { + atomic.AddUint64(&is.parent.stats.TotBytesReadAtQueryTime, val) +} + +func (is *IndexSnapshot) GetSpatialAnalyzerPlugin(typ string) ( + index.SpatialAnalyzerPlugin, error) { + var rv index.SpatialAnalyzerPlugin + is.m.Lock() + rv = is.parent.spatialPlugin + is.m.Unlock() + + if rv == nil { + return nil, fmt.Errorf("no spatial plugin type: %s found", typ) + } + return rv, nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go index 0a2bd23..658aa81 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_dict.go @@ -28,9 +28,14 @@ type segmentDictCursor struct { } type IndexSnapshotFieldDict struct { - snapshot *IndexSnapshot - cursors []*segmentDictCursor - entry index.DictEntry + snapshot *IndexSnapshot + cursors []*segmentDictCursor + entry index.DictEntry + bytesRead uint64 +} + +func (i *IndexSnapshotFieldDict) BytesRead() uint64 { + return i.bytesRead } func (i *IndexSnapshotFieldDict) Len() int { return len(i.cursors) } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go index e983e3d..349620c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_index_tfr.go @@ -16,10 +16,12 @@ package scorch import ( "bytes" + "context" "fmt" "reflect" "sync/atomic" + "github.com/blevesearch/bleve/v2/search" "github.com/blevesearch/bleve/v2/size" index "github.com/blevesearch/bleve_index_api" segment "github.com/blevesearch/scorch_segment_api/v2" @@ -46,6 +48,12 @@ type IndexSnapshotTermFieldReader struct { currPosting segment.Posting currID index.IndexInternalID recycle bool + bytesRead uint64 + ctx context.Context +} + +func (i *IndexSnapshotTermFieldReader) incrementBytesRead(val uint64) { + i.bytesRead += val } func (i *IndexSnapshotTermFieldReader) Size() int { @@ -76,6 +84,7 @@ func (i *IndexSnapshotTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*in } // find the next hit for i.segmentOffset < len(i.iterators) { + prevBytesRead := i.iterators[i.segmentOffset].BytesRead() next, err := i.iterators[i.segmentOffset].Next() if err != nil { return nil, err @@ -89,6 +98,14 @@ func (i *IndexSnapshotTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*in i.currID = rv.ID i.currPosting = next + // postingsIterators is maintain the bytesRead stat in a cumulative fashion. + // this is because there are chances of having a series of loadChunk calls, + // and they have to be added together before sending the bytesRead at this point + // upstream. + if delta := i.iterators[i.segmentOffset].BytesRead() - prevBytesRead; delta > 0 { + i.incrementBytesRead(delta) + } + return rv, nil } i.segmentOffset++ @@ -129,7 +146,7 @@ func (i *IndexSnapshotTermFieldReader) Advance(ID index.IndexInternalID, preAllo // FIXME do something better // for now, if we need to seek backwards, then restart from the beginning if i.currPosting != nil && bytes.Compare(i.currID, ID) >= 0 { - i2, err := i.snapshot.TermFieldReader(i.term, i.field, + i2, err := i.snapshot.TermFieldReader(nil, i.term, i.field, i.includeFreq, i.includeNorm, i.includeTermVectors) if err != nil { return nil, err @@ -180,6 +197,15 @@ func (i *IndexSnapshotTermFieldReader) Count() uint64 { } func (i *IndexSnapshotTermFieldReader) Close() error { + if i.ctx != nil { + statsCallbackFn := i.ctx.Value(search.SearchIOStatsCallbackKey) + if statsCallbackFn != nil { + // essentially before you close the TFR, you must report this + // reader's bytesRead value + statsCallbackFn.(search.SearchIOStatsCallbackFunc)(i.bytesRead) + } + } + if i.snapshot != nil { atomic.AddUint64(&i.snapshot.parent.stats.TotTermSearchersFinished, uint64(1)) i.snapshot.recycleTermFieldReader(i) diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go index e017eb2..0b76ec7 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/snapshot_segment.go @@ -30,6 +30,11 @@ var TermSeparator byte = 0xff var TermSeparatorSplitSlice = []byte{TermSeparator} type SegmentSnapshot struct { + // this flag is needed to identify whether this + // segment was mmaped recently, in which case + // we consider the loading cost of the metadata + // as part of IO stats. + mmaped uint32 id uint64 segment segment.Segment deleted *roaring.Bitmap @@ -54,7 +59,7 @@ func (s *SegmentSnapshot) FullSize() int64 { return int64(s.segment.Count()) } -func (s SegmentSnapshot) LiveSize() int64 { +func (s *SegmentSnapshot) LiveSize() int64 { return int64(s.Count()) } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/stats.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/stats.go index 626fff2..d265e9d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/stats.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/stats.go @@ -44,6 +44,9 @@ type Stats struct { TotIndexedPlainTextBytes uint64 + TotBytesReadAtQueryTime uint64 + TotBytesWrittenAtIndexTime uint64 + TotTermSearchersStarted uint64 TotTermSearchersFinished uint64 diff --git a/vendor/github.com/blevesearch/bleve/v2/index/scorch/unadorned.go b/vendor/github.com/blevesearch/bleve/v2/index/scorch/unadorned.go index 855b813..8221b23 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/scorch/unadorned.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/scorch/unadorned.go @@ -15,10 +15,11 @@ package scorch import ( - "github.com/RoaringBitmap/roaring" - segment "github.com/blevesearch/scorch_segment_api/v2" "math" "reflect" + + "github.com/RoaringBitmap/roaring" + segment "github.com/blevesearch/scorch_segment_api/v2" ) var reflectStaticSizeUnadornedPostingsIteratorBitmap int @@ -72,6 +73,16 @@ func (i *unadornedPostingsIteratorBitmap) Size() int { return reflectStaticSizeUnadornedPostingsIteratorBitmap } +func (i *unadornedPostingsIteratorBitmap) BytesRead() uint64 { + return 0 +} + +func (i *unadornedPostingsIteratorBitmap) BytesWritten() uint64 { + return 0 +} + +func (i *unadornedPostingsIteratorBitmap) ResetBytesRead(uint64) {} + func (i *unadornedPostingsIteratorBitmap) ActualBitmap() *roaring.Bitmap { return i.actualBM } @@ -132,6 +143,16 @@ func (i *unadornedPostingsIterator1Hit) Size() int { return reflectStaticSizeUnadornedPostingsIterator1Hit } +func (i *unadornedPostingsIterator1Hit) BytesRead() uint64 { + return 0 +} + +func (i *unadornedPostingsIterator1Hit) BytesWritten() uint64 { + return 0 +} + +func (i *unadornedPostingsIterator1Hit) ResetBytesRead(uint64) {} + func newUnadornedPostingsIteratorFrom1Hit(docNum1Hit uint64) segment.PostingsIterator { return &unadornedPostingsIterator1Hit{ docNum1Hit, diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go index c4be577..4875680 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/field_dict.go @@ -51,6 +51,10 @@ func newUpsideDownCouchFieldDict(indexReader *IndexReader, field uint16, startTe } +func (r *UpsideDownCouchFieldDict) BytesRead() uint64 { + return 0 +} + func (r *UpsideDownCouchFieldDict) Next() (*index.DictEntry, error) { key, val, valid := r.iterator.Current() if !valid { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go index ff0986d..5c164fc 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/index_reader.go @@ -15,6 +15,7 @@ package upsidedown import ( + "context" "reflect" "github.com/blevesearch/bleve/v2/document" @@ -35,7 +36,7 @@ type IndexReader struct { docCount uint64 } -func (i *IndexReader) TermFieldReader(term []byte, fieldName string, includeFreq, includeNorm, includeTermVectors bool) (index.TermFieldReader, error) { +func (i *IndexReader) TermFieldReader(ctx context.Context, term []byte, fieldName string, includeFreq, includeNorm, includeTermVectors bool) (index.TermFieldReader, error) { fieldIndex, fieldExists := i.index.fieldCache.FieldNamed(fieldName, false) if fieldExists { return newUpsideDownCouchTermFieldReader(i, term, uint16(fieldIndex), includeFreq, includeNorm, includeTermVectors) @@ -223,3 +224,5 @@ func (dvr *DocValueReader) VisitDocValues(id index.IndexInternalID, visitor index.DocValueVisitor) error { return dvr.i.documentVisitFieldTerms(id, dvr.fields, visitor) } + +func (dvr *DocValueReader) BytesRead() uint64 { return 0 } diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/batch.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/batch.go index 7a58bb8..1536588 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/batch.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/batch.go @@ -15,8 +15,8 @@ package goleveldb import ( + "github.com/blevesearch/goleveldb/leveldb" store "github.com/blevesearch/upsidedown_store_api" - "github.com/syndtr/goleveldb/leveldb" ) type Batch struct { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/config.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/config.go index 4baca70..376db83 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/config.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/config.go @@ -15,8 +15,8 @@ package goleveldb import ( - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/blevesearch/goleveldb/leveldb/filter" + "github.com/blevesearch/goleveldb/leveldb/opt" ) func applyConfig(o *opt.Options, config map[string]interface{}) (*opt.Options, error) { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/iterator.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/iterator.go index e7be4e8..e8e002d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/iterator.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/iterator.go @@ -14,7 +14,7 @@ package goleveldb -import "github.com/syndtr/goleveldb/leveldb/iterator" +import "github.com/blevesearch/goleveldb/leveldb/iterator" type Iterator struct { store *Store diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/reader.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/reader.go index 5841092..9fa3a0e 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/reader.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/reader.go @@ -15,9 +15,9 @@ package goleveldb import ( + "github.com/blevesearch/goleveldb/leveldb" + "github.com/blevesearch/goleveldb/leveldb/util" store "github.com/blevesearch/upsidedown_store_api" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/util" ) type Reader struct { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/store.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/store.go index a4b49bb..6e8fbfd 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/store.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/store.go @@ -20,10 +20,10 @@ import ( "os" "github.com/blevesearch/bleve/v2/registry" + "github.com/blevesearch/goleveldb/leveldb" + "github.com/blevesearch/goleveldb/leveldb/opt" + "github.com/blevesearch/goleveldb/leveldb/util" store "github.com/blevesearch/upsidedown_store_api" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" ) const ( diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/writer.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/writer.go index 4746c94..8dfd30c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/writer.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/goleveldb/writer.go @@ -17,8 +17,8 @@ package goleveldb import ( "fmt" + "github.com/blevesearch/goleveldb/leveldb" store "github.com/blevesearch/upsidedown_store_api" - "github.com/syndtr/goleveldb/leveldb" ) type Writer struct { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go index 092ccf2..c03f75c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/iterator.go @@ -21,7 +21,7 @@ import ( "bytes" "sync" - "github.com/steveyen/gtreap" + "github.com/blevesearch/gtreap" ) type Iterator struct { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go index 34df813..52b05d7 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/reader.go @@ -20,7 +20,7 @@ package gtreap import ( "github.com/blevesearch/upsidedown_store_api" - "github.com/steveyen/gtreap" + "github.com/blevesearch/gtreap" ) type Reader struct { diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go index 8d1f588..3cc7eb9 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/store/gtreap/store.go @@ -25,8 +25,8 @@ import ( "sync" "github.com/blevesearch/bleve/v2/registry" + "github.com/blevesearch/gtreap" "github.com/blevesearch/upsidedown_store_api" - "github.com/steveyen/gtreap" ) const Name = "gtreap" diff --git a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go index d67ee03..34c3319 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go +++ b/vendor/github.com/blevesearch/bleve/v2/index/upsidedown/upsidedown.go @@ -727,6 +727,8 @@ func decodeFieldType(typ byte, name string, pos []uint64, value []byte) document return document.NewBooleanFieldFromBytes(name, pos, value) case 'g': return document.NewGeoPointFieldFromBytes(name, pos, value) + case 'i': + return document.NewIPFieldFromBytes(name, pos, value) } return nil } diff --git a/vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go b/vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go index 5a4dc5a..a73dd6b 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go +++ b/vendor/github.com/blevesearch/bleve/v2/index_alias_impl.go @@ -602,6 +602,10 @@ type indexAliasImplFieldDict struct { fieldDict index.FieldDict } +func (f *indexAliasImplFieldDict) BytesRead() uint64 { + return f.fieldDict.BytesRead() +} + func (f *indexAliasImplFieldDict) Next() (*index.DictEntry, error) { return f.fieldDict.Next() } diff --git a/vendor/github.com/blevesearch/bleve/v2/index_impl.go b/vendor/github.com/blevesearch/bleve/v2/index_impl.go index 879a366..c5a0c46 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index_impl.go +++ b/vendor/github.com/blevesearch/bleve/v2/index_impl.go @@ -18,12 +18,15 @@ import ( "context" "encoding/json" "fmt" + "io" "os" + "path/filepath" "sync" "sync/atomic" "time" "github.com/blevesearch/bleve/v2/document" + "github.com/blevesearch/bleve/v2/index/scorch" "github.com/blevesearch/bleve/v2/index/upsidedown" "github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/registry" @@ -466,7 +469,20 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr } }() - searcher, err := req.Query.Searcher(indexReader, i.m, search.SearcherOptions{ + // This callback and variable handles the tracking of bytes read + // 1. as part of creation of tfr and its Next() calls which is + // accounted by invoking this callback when the TFR is closed. + // 2. the docvalues portion (accounted in collector) and the retrieval + // of stored fields bytes (by LoadAndHighlightFields) + var totalBytesRead uint64 + sendBytesRead := func(bytesRead uint64) { + totalBytesRead += bytesRead + } + + ctx = context.WithValue(ctx, search.SearchIOStatsCallbackKey, + search.SearchIOStatsCallbackFunc(sendBytesRead)) + + searcher, err := req.Query.Searcher(ctx, indexReader, i.m, search.SearcherOptions{ Explain: req.Explain, IncludeTermVectors: req.IncludeLocations || req.Highlight != nil, Score: req.Score, @@ -478,6 +494,12 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr if serr := searcher.Close(); err == nil && serr != nil { err = serr } + if sr != nil { + sr.BytesRead = totalBytesRead + } + if sr, ok := indexReader.(*scorch.IndexSnapshot); ok { + sr.UpdateIOStats(totalBytesRead) + } }() if req.Facets != nil { @@ -556,10 +578,11 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr if i.name != "" { hit.Index = i.name } - err = LoadAndHighlightFields(hit, req, i.name, indexReader, highlighter) + err, storedFieldsBytes := LoadAndHighlightFields(hit, req, i.name, indexReader, highlighter) if err != nil { return nil, err } + totalBytesRead += storedFieldsBytes } atomic.AddUint64(&i.stats.searches, 1) @@ -598,9 +621,11 @@ func (i *indexImpl) SearchInContext(ctx context.Context, req *SearchRequest) (sr func LoadAndHighlightFields(hit *search.DocumentMatch, req *SearchRequest, indexName string, r index.IndexReader, - highlighter highlight.Highlighter) error { + highlighter highlight.Highlighter) (error, uint64) { + var totalStoredFieldsBytes uint64 if len(req.Fields) > 0 || highlighter != nil { doc, err := r.Document(hit.ID) + totalStoredFieldsBytes = doc.StoredFieldsBytes() if err == nil && doc != nil { if len(req.Fields) > 0 { fieldsToLoad := deDuplicate(req.Fields) @@ -634,7 +659,13 @@ func LoadAndHighlightFields(hit *search.DocumentMatch, req *SearchRequest, value = []float64{lon, lat} } } + case index.GeoShapeField: + v, err := docF.GeoShape() + if err == nil { + value = v + } } + if value != nil { hit.AddFieldValue(docF.Name(), value) } @@ -658,11 +689,11 @@ func LoadAndHighlightFields(hit *search.DocumentMatch, req *SearchRequest, } else if doc == nil { // unexpected case, a doc ID that was found as a search hit // was unable to be found during document lookup - return ErrorIndexReadInconsistency + return ErrorIndexReadInconsistency, 0 } } - return nil + return nil, totalStoredFieldsBytes } // Fields returns the name of all the fields this @@ -862,6 +893,10 @@ type indexImplFieldDict struct { fieldDict index.FieldDict } +func (f *indexImplFieldDict) BytesRead() uint64 { + return f.fieldDict.BytesRead() +} + func (f *indexImplFieldDict) Next() (*index.DictEntry, error) { return f.fieldDict.Next() } @@ -910,3 +945,49 @@ func (m *searchHitSorter) Less(i, j int) bool { c := m.sort.Compare(m.cachedScoring, m.cachedDesc, m.hits[i], m.hits[j]) return c < 0 } + +func (i *indexImpl) CopyTo(d index.Directory) (err error) { + i.mutex.RLock() + defer i.mutex.RUnlock() + + if !i.open { + return ErrorIndexClosed + } + + indexReader, err := i.i.Reader() + if err != nil { + return err + } + defer func() { + if cerr := indexReader.Close(); err == nil && cerr != nil { + err = cerr + } + }() + + irc, ok := indexReader.(IndexCopyable) + if !ok { + return fmt.Errorf("index implementation does not support copy") + } + + err = irc.CopyTo(d) + if err != nil { + return fmt.Errorf("error copying index metadata: %v", err) + } + + // copy the metadata + return i.meta.CopyTo(d) +} + +func (f FileSystemDirectory) GetWriter(filePath string) (io.WriteCloser, + error) { + dir, file := filepath.Split(filePath) + if dir != "" { + err := os.MkdirAll(filepath.Join(string(f), dir), os.ModePerm) + if err != nil { + return nil, err + } + } + + return os.OpenFile(filepath.Join(string(f), dir, file), + os.O_RDWR|os.O_CREATE, 0600) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/index_meta.go b/vendor/github.com/blevesearch/bleve/v2/index_meta.go index fe0ddeb..711e0c1 100644 --- a/vendor/github.com/blevesearch/bleve/v2/index_meta.go +++ b/vendor/github.com/blevesearch/bleve/v2/index_meta.go @@ -16,11 +16,13 @@ package bleve import ( "encoding/json" + "fmt" "io/ioutil" "os" "path/filepath" "github.com/blevesearch/bleve/v2/index/upsidedown" + index "github.com/blevesearch/bleve_index_api" ) const metaFilename = "index_meta.json" @@ -92,6 +94,23 @@ func (i *indexMeta) Save(path string) (err error) { return nil } +func (i *indexMeta) CopyTo(d index.Directory) (err error) { + metaBytes, err := json.Marshal(i) + if err != nil { + return err + } + + w, err := d.GetWriter(metaFilename) + if w == nil || err != nil { + return fmt.Errorf("invalid writer for file: %s, err: %v", + metaFilename, err) + } + defer w.Close() + + _, err = w.Write(metaBytes) + return err +} + func indexMetaPath(path string) string { return filepath.Join(path, metaFilename) } diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping.go b/vendor/github.com/blevesearch/bleve/v2/mapping.go index 5475307..723105a 100644 --- a/vendor/github.com/blevesearch/bleve/v2/mapping.go +++ b/vendor/github.com/blevesearch/bleve/v2/mapping.go @@ -45,6 +45,12 @@ func NewTextFieldMapping() *mapping.FieldMapping { return mapping.NewTextFieldMapping() } +// NewKeywordFieldMapping returns a field mapping for text using the keyword +// analyzer, which essentially doesn't apply any specific text analysis. +func NewKeywordFieldMapping() *mapping.FieldMapping { + return mapping.NewKeywordFieldMapping() +} + // NewNumericFieldMapping returns a default field mapping for numbers func NewNumericFieldMapping() *mapping.FieldMapping { return mapping.NewNumericFieldMapping() @@ -63,3 +69,11 @@ func NewBooleanFieldMapping() *mapping.FieldMapping { func NewGeoPointFieldMapping() *mapping.FieldMapping { return mapping.NewGeoPointFieldMapping() } + +func NewGeoShapeFieldMapping() *mapping.FieldMapping { + return mapping.NewGeoShapeFieldMapping() +} + +func NewIPFieldMapping() *mapping.FieldMapping { + return mapping.NewIPFieldMapping() +} diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/document.go b/vendor/github.com/blevesearch/bleve/v2/mapping/document.go index 44911de..06b4990 100644 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/document.go +++ b/vendor/github.com/blevesearch/bleve/v2/mapping/document.go @@ -18,6 +18,7 @@ import ( "encoding" "encoding/json" "fmt" + "net" "reflect" "time" @@ -76,7 +77,7 @@ func (dm *DocumentMapping) Validate(cache *registry.Cache) error { } } switch field.Type { - case "text", "datetime", "number", "boolean", "geopoint": + case "text", "datetime", "number", "boolean", "geopoint", "geoshape", "IP": default: return fmt.Errorf("unknown field type: '%s'", field.Type) } @@ -426,7 +427,9 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string, if subDocMapping != nil { // index by explicit mapping for _, fieldMapping := range subDocMapping.Fields { - if fieldMapping.Type == "geopoint" { + if fieldMapping.Type == "geoshape" { + fieldMapping.processGeoShape(property, pathString, path, indexes, context) + } else if fieldMapping.Type == "geopoint" { fieldMapping.processGeoPoint(property, pathString, path, indexes, context) } else { fieldMapping.processString(propertyValueString, pathString, path, indexes, context) @@ -509,6 +512,8 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string, for _, fieldMapping := range subDocMapping.Fields { if fieldMapping.Type == "geopoint" { fieldMapping.processGeoPoint(property, pathString, path, indexes, context) + } else if fieldMapping.Type == "geoshape" { + fieldMapping.processGeoShape(property, pathString, path, indexes, context) } } } @@ -517,8 +522,16 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string, case reflect.Map, reflect.Slice: if subDocMapping != nil { for _, fieldMapping := range subDocMapping.Fields { - if fieldMapping.Type == "geopoint" { + switch fieldMapping.Type { + case "geopoint": fieldMapping.processGeoPoint(property, pathString, path, indexes, context) + case "IP": + ip, ok := property.(net.IP) + if ok { + fieldMapping.processIP(ip, pathString, path, indexes, context) + } + case "geoshape": + fieldMapping.processGeoShape(property, pathString, path, indexes, context) } } } @@ -528,7 +541,7 @@ func (dm *DocumentMapping) processProperty(property interface{}, path []string, switch property := property.(type) { case encoding.TextMarshaler: // ONLY process TextMarshaler if there is an explicit mapping - // AND all of the fiels are of type text + // AND all of the fields are of type text // OTHERWISE process field without TextMarshaler if subDocMapping != nil { allFieldsText := true diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/field.go b/vendor/github.com/blevesearch/bleve/v2/mapping/field.go index 7ad1744..511782a 100644 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/field.go +++ b/vendor/github.com/blevesearch/bleve/v2/mapping/field.go @@ -17,8 +17,10 @@ package mapping import ( "encoding/json" "fmt" + "net" "time" + "github.com/blevesearch/bleve/v2/analysis/analyzer/keyword" index "github.com/blevesearch/bleve_index_api" "github.com/blevesearch/bleve/v2/analysis" @@ -89,6 +91,19 @@ func newTextFieldMappingDynamic(im *IndexMappingImpl) *FieldMapping { return rv } +// NewKeyworFieldMapping returns a default field mapping for text with analyzer "keyword". +func NewKeywordFieldMapping() *FieldMapping { + return &FieldMapping{ + Type: "text", + Analyzer: keyword.Name, + Store: true, + Index: true, + IncludeTermVectors: true, + IncludeInAll: true, + DocValues: true, + } +} + // NewNumericFieldMapping returns a default field mapping for numbers func NewNumericFieldMapping() *FieldMapping { return &FieldMapping{ @@ -157,6 +172,28 @@ func NewGeoPointFieldMapping() *FieldMapping { } } +// NewGeoShapeFieldMapping returns a default field mapping +// for geoshapes +func NewGeoShapeFieldMapping() *FieldMapping { + return &FieldMapping{ + Type: "geoshape", + Store: true, + Index: true, + IncludeInAll: true, + DocValues: true, + } +} + +// NewIPFieldMapping returns a default field mapping for IP points +func NewIPFieldMapping() *FieldMapping { + return &FieldMapping{ + Type: "IP", + Store: true, + Index: true, + IncludeInAll: true, + } +} + // Options returns the indexing options for this field. func (fm *FieldMapping) Options() index.FieldIndexingOptions { var rv index.FieldIndexingOptions @@ -201,6 +238,11 @@ func (fm *FieldMapping) processString(propertyValueString string, pathString str fm.processTime(parsedDateTime, pathString, path, indexes, context) } } + } else if fm.Type == "IP" { + ip := net.ParseIP(propertyValueString) + if ip != nil { + fm.processIP(ip, pathString, path, indexes, context) + } } } @@ -261,7 +303,67 @@ func (fm *FieldMapping) processGeoPoint(propertyMightBeGeoPoint interface{}, pat } } -func (fm *FieldMapping) analyzerForField(path []string, context *walkContext) *analysis.Analyzer { +func (fm *FieldMapping) processIP(ip net.IP, pathString string, path []string, indexes []uint64, context *walkContext) { + fieldName := getFieldName(pathString, path, fm) + options := fm.Options() + field := document.NewIPFieldWithIndexingOptions(fieldName, indexes, ip, options) + context.doc.AddField(field) + + if !fm.IncludeInAll { + context.excludedFromAll = append(context.excludedFromAll, fieldName) + } +} + +func (fm *FieldMapping) processGeoShape(propertyMightBeGeoShape interface{}, + pathString string, path []string, indexes []uint64, context *walkContext) { + coordValue, shape, err := geo.ParseGeoShapeField(propertyMightBeGeoShape) + if err != nil { + return + } + + if shape == geo.CircleType { + center, radius, found := geo.ExtractCircle(propertyMightBeGeoShape) + if found { + fieldName := getFieldName(pathString, path, fm) + options := fm.Options() + field := document.NewGeoCircleFieldWithIndexingOptions(fieldName, + indexes, center, radius, options) + context.doc.AddField(field) + + if !fm.IncludeInAll { + context.excludedFromAll = append(context.excludedFromAll, fieldName) + } + } + } else if shape == geo.GeometryCollectionType { + coordinates, shapes, found := geo.ExtractGeometryCollection(propertyMightBeGeoShape) + if found { + fieldName := getFieldName(pathString, path, fm) + options := fm.Options() + field := document.NewGeometryCollectionFieldWithIndexingOptions(fieldName, + indexes, coordinates, shapes, options) + context.doc.AddField(field) + + if !fm.IncludeInAll { + context.excludedFromAll = append(context.excludedFromAll, fieldName) + } + } + } else { + coordinates, shape, found := geo.ExtractGeoShapeCoordinates(coordValue, shape) + if found { + fieldName := getFieldName(pathString, path, fm) + options := fm.Options() + field := document.NewGeoShapeFieldWithIndexingOptions(fieldName, + indexes, coordinates, shape, options) + context.doc.AddField(field) + + if !fm.IncludeInAll { + context.excludedFromAll = append(context.excludedFromAll, fieldName) + } + } + } +} + +func (fm *FieldMapping) analyzerForField(path []string, context *walkContext) analysis.Analyzer { analyzerName := fm.Analyzer if analyzerName == "" { analyzerName = context.dm.defaultAnalyzerName(path) diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/index.go b/vendor/github.com/blevesearch/bleve/v2/mapping/index.go index c343433..f97442f 100644 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/index.go +++ b/vendor/github.com/blevesearch/bleve/v2/mapping/index.go @@ -386,7 +386,7 @@ func (im *IndexMappingImpl) AnalyzerNameForPath(path string) string { return im.DefaultAnalyzer } -func (im *IndexMappingImpl) AnalyzerNamed(name string) *analysis.Analyzer { +func (im *IndexMappingImpl) AnalyzerNamed(name string) analysis.Analyzer { analyzer, err := im.cache.AnalyzerNamed(name) if err != nil { logger.Printf("error using analyzer named: %s", name) diff --git a/vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go b/vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go index bcf6749..2506352 100644 --- a/vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go +++ b/vendor/github.com/blevesearch/bleve/v2/mapping/mapping.go @@ -54,5 +54,5 @@ type IndexMapping interface { DefaultSearchField() string AnalyzerNameForPath(path string) string - AnalyzerNamed(name string) *analysis.Analyzer + AnalyzerNamed(name string) analysis.Analyzer } diff --git a/vendor/github.com/blevesearch/bleve/v2/query.go b/vendor/github.com/blevesearch/bleve/v2/query.go index 91d2d5f..4f1f136 100644 --- a/vendor/github.com/blevesearch/bleve/v2/query.go +++ b/vendor/github.com/blevesearch/bleve/v2/query.go @@ -216,3 +216,12 @@ func NewGeoBoundingBoxQuery(topLeftLon, topLeftLat, bottomRightLon, bottomRightL func NewGeoDistanceQuery(lon, lat float64, distance string) *query.GeoDistanceQuery { return query.NewGeoDistanceQuery(lon, lat, distance) } + +// NewIPRangeQuery creates a new Query for matching IP addresses. +// If the argument is in CIDR format, then the query will match all +// IP addresses in the network specified. If the argument is an IP address, +// then the query will return documents which contain that IP. +// Both ipv4 and ipv6 are supported. +func NewIPRangeQuery(cidr string) *query.IPRangeQuery { + return query.NewIPRangeQuery(cidr) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go b/vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go index 6c0d73b..f4753bc 100644 --- a/vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go +++ b/vendor/github.com/blevesearch/bleve/v2/registry/analyzer.go @@ -28,7 +28,7 @@ func RegisterAnalyzer(name string, constructor AnalyzerConstructor) { analyzers[name] = constructor } -type AnalyzerConstructor func(config map[string]interface{}, cache *Cache) (*analysis.Analyzer, error) +type AnalyzerConstructor func(config map[string]interface{}, cache *Cache) (analysis.Analyzer, error) type AnalyzerRegistry map[string]AnalyzerConstructor type AnalyzerCache struct { @@ -53,15 +53,15 @@ func AnalyzerBuild(name string, config map[string]interface{}, cache *Cache) (in return analyzer, nil } -func (c *AnalyzerCache) AnalyzerNamed(name string, cache *Cache) (*analysis.Analyzer, error) { +func (c *AnalyzerCache) AnalyzerNamed(name string, cache *Cache) (analysis.Analyzer, error) { item, err := c.ItemNamed(name, cache, AnalyzerBuild) if err != nil { return nil, err } - return item.(*analysis.Analyzer), nil + return item.(analysis.Analyzer), nil } -func (c *AnalyzerCache) DefineAnalyzer(name string, typ string, config map[string]interface{}, cache *Cache) (*analysis.Analyzer, error) { +func (c *AnalyzerCache) DefineAnalyzer(name string, typ string, config map[string]interface{}, cache *Cache) (analysis.Analyzer, error) { item, err := c.DefineItem(name, typ, config, cache, AnalyzerBuild) if err != nil { if err == ErrAlreadyDefined { @@ -69,7 +69,7 @@ func (c *AnalyzerCache) DefineAnalyzer(name string, typ string, config map[strin } return nil, err } - return item.(*analysis.Analyzer), nil + return item.(analysis.Analyzer), nil } func AnalyzerTypesAndInstances() ([]string, []string) { diff --git a/vendor/github.com/blevesearch/bleve/v2/registry/registry.go b/vendor/github.com/blevesearch/bleve/v2/registry/registry.go index 02125e6..1954d08 100644 --- a/vendor/github.com/blevesearch/bleve/v2/registry/registry.go +++ b/vendor/github.com/blevesearch/bleve/v2/registry/registry.go @@ -123,11 +123,11 @@ func (c *Cache) DefineTokenFilter(name string, config map[string]interface{}) (a return c.TokenFilters.DefineTokenFilter(name, typ, config, c) } -func (c *Cache) AnalyzerNamed(name string) (*analysis.Analyzer, error) { +func (c *Cache) AnalyzerNamed(name string) (analysis.Analyzer, error) { return c.Analyzers.AnalyzerNamed(name, c) } -func (c *Cache) DefineAnalyzer(name string, config map[string]interface{}) (*analysis.Analyzer, error) { +func (c *Cache) DefineAnalyzer(name string, config map[string]interface{}) (analysis.Analyzer, error) { typ, err := typeFromConfig(config) if err != nil { return nil, err diff --git a/vendor/github.com/blevesearch/bleve/v2/search.go b/vendor/github.com/blevesearch/bleve/v2/search.go index 7397f56..4cb4b6b 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search.go +++ b/vendor/github.com/blevesearch/bleve/v2/search.go @@ -486,13 +486,14 @@ func (ss *SearchStatus) Merge(other *SearchStatus) { // A SearchResult describes the results of executing // a SearchRequest. type SearchResult struct { - Status *SearchStatus `json:"status"` - Request *SearchRequest `json:"request"` - Hits search.DocumentMatchCollection `json:"hits"` - Total uint64 `json:"total_hits"` - MaxScore float64 `json:"max_score"` - Took time.Duration `json:"took"` - Facets search.FacetResults `json:"facets"` + Status *SearchStatus `json:"status"` + Request *SearchRequest `json:"request"` + Hits search.DocumentMatchCollection `json:"hits"` + Total uint64 `json:"total_hits"` + BytesRead uint64 `json:"bytesRead,omitempty"` + MaxScore float64 `json:"max_score"` + Took time.Duration `json:"took"` + Facets search.FacetResults `json:"facets"` } func (sr *SearchResult) Size() int { @@ -543,9 +544,15 @@ func (sr *SearchResult) String() string { rv += fmt.Sprintf("Facets:\n") for fn, f := range sr.Facets { rv += fmt.Sprintf("%s(%d)\n", fn, f.Total) - for _, t := range f.Terms { + for _, t := range f.Terms.Terms() { rv += fmt.Sprintf("\t%s(%d)\n", t.Term, t.Count) } + for _, n := range f.NumericRanges { + rv += fmt.Sprintf("\t%s(%d)\n", n.Name, n.Count) + } + for _, d := range f.DateRanges { + rv += fmt.Sprintf("\t%s(%d)\n", d.Name, d.Count) + } if f.Other != 0 { rv += fmt.Sprintf("\tOther(%d)\n", f.Other) } @@ -559,6 +566,7 @@ func (sr *SearchResult) Merge(other *SearchResult) { sr.Status.Merge(other.Status) sr.Hits = append(sr.Hits, other.Hits...) sr.Total += other.Total + sr.BytesRead += other.BytesRead if other.MaxScore > sr.MaxScore { sr.MaxScore = other.MaxScore } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go b/vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go index aa1d65b..4d19cd4 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/collector/topn.go @@ -54,6 +54,7 @@ type TopNCollector struct { size int skip int total uint64 + bytesRead uint64 maxScore float64 took time.Duration sort search.SortOrder @@ -83,7 +84,7 @@ func NewTopNCollector(size int, skip int, sort search.SortOrder) *TopNCollector return newTopNCollector(size, skip, sort) } -// NewTopNCollector builds a collector to find the top 'size' hits +// NewTopNCollectorAfter builds a collector to find the top 'size' hits // skipping over the first 'skip' hits // ordering hits by the provided sort order func NewTopNCollectorAfter(size int, sort search.SortOrder, after []string) *TopNCollector { @@ -197,7 +198,6 @@ func (hc *TopNCollector) Collect(ctx context.Context, searcher search.Searcher, } hc.needDocIds = hc.needDocIds || loadID - select { case <-ctx.Done(): return ctx.Err() @@ -226,6 +226,14 @@ func (hc *TopNCollector) Collect(ctx context.Context, searcher search.Searcher, next, err = searcher.Next(searchContext) } + statsCallbackFn := ctx.Value(search.SearchIOStatsCallbackKey) + if statsCallbackFn != nil { + // hc.bytesRead corresponds to the + // total bytes read as part of docValues being read every hit + // which must be accounted by invoking the callback. + statsCallbackFn.(search.SearchIOStatsCallbackFunc)(hc.bytesRead) + } + // help finalize/flush the results in case // of custom document match handlers. err = dmHandler(nil) @@ -235,9 +243,7 @@ func (hc *TopNCollector) Collect(ctx context.Context, searcher search.Searcher, // compute search duration hc.took = time.Since(startTime) - if err != nil { - return err - } + // finalize actual results err = hc.finalizeResults(reader) if err != nil { @@ -353,6 +359,8 @@ func (hc *TopNCollector) visitFieldTerms(reader index.IndexReader, d *search.Doc hc.facetsBuilder.EndDoc() } + hc.bytesRead += hc.dvReader.BytesRead() + return err } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go index a316ee4..ff5167f 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_datetime.go @@ -87,23 +87,21 @@ func (fb *DateTimeFacetBuilder) Field() string { return fb.field } -func (fb *DateTimeFacetBuilder) UpdateVisitor(field string, term []byte) { - if field == fb.field { - fb.sawValue = true - // only consider the values which are shifted 0 - prefixCoded := numeric.PrefixCoded(term) - shift, err := prefixCoded.Shift() - if err == nil && shift == 0 { - i64, err := prefixCoded.Int64() - if err == nil { - t := time.Unix(0, i64) - - // look at each of the ranges for a match - for rangeName, r := range fb.ranges { - if (r.start.IsZero() || t.After(r.start) || t.Equal(r.start)) && (r.end.IsZero() || t.Before(r.end)) { - fb.termsCount[rangeName] = fb.termsCount[rangeName] + 1 - fb.total++ - } +func (fb *DateTimeFacetBuilder) UpdateVisitor(term []byte) { + fb.sawValue = true + // only consider the values which are shifted 0 + prefixCoded := numeric.PrefixCoded(term) + shift, err := prefixCoded.Shift() + if err == nil && shift == 0 { + i64, err := prefixCoded.Int64() + if err == nil { + t := time.Unix(0, i64) + + // look at each of the ranges for a match + for rangeName, r := range fb.ranges { + if (r.start.IsZero() || t.After(r.start) || t.Equal(r.start)) && (r.end.IsZero() || t.Before(r.end)) { + fb.termsCount[rangeName] = fb.termsCount[rangeName] + 1 + fb.total++ } } } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go index 6d0c6c9..f19634d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_numeric.go @@ -86,23 +86,21 @@ func (fb *NumericFacetBuilder) Field() string { return fb.field } -func (fb *NumericFacetBuilder) UpdateVisitor(field string, term []byte) { - if field == fb.field { - fb.sawValue = true - // only consider the values which are shifted 0 - prefixCoded := numeric.PrefixCoded(term) - shift, err := prefixCoded.Shift() - if err == nil && shift == 0 { - i64, err := prefixCoded.Int64() - if err == nil { - f64 := numeric.Int64ToFloat64(i64) - - // look at each of the ranges for a match - for rangeName, r := range fb.ranges { - if (r.min == nil || f64 >= *r.min) && (r.max == nil || f64 < *r.max) { - fb.termsCount[rangeName] = fb.termsCount[rangeName] + 1 - fb.total++ - } +func (fb *NumericFacetBuilder) UpdateVisitor(term []byte) { + fb.sawValue = true + // only consider the values which are shifted 0 + prefixCoded := numeric.PrefixCoded(term) + shift, err := prefixCoded.Shift() + if err == nil && shift == 0 { + i64, err := prefixCoded.Int64() + if err == nil { + f64 := numeric.Int64ToFloat64(i64) + + // look at each of the ranges for a match + for rangeName, r := range fb.ranges { + if (r.min == nil || f64 >= *r.min) && (r.max == nil || f64 < *r.max) { + fb.termsCount[rangeName] = fb.termsCount[rangeName] + 1 + fb.total++ } } } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go index 1b378db..c5a1c83 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/facet/facet_builder_terms.go @@ -62,12 +62,10 @@ func (fb *TermsFacetBuilder) Field() string { return fb.field } -func (fb *TermsFacetBuilder) UpdateVisitor(field string, term []byte) { - if field == fb.field { - fb.sawValue = true - fb.termsCount[string(term)] = fb.termsCount[string(term)] + 1 - fb.total++ - } +func (fb *TermsFacetBuilder) UpdateVisitor(term []byte) { + fb.sawValue = true + fb.termsCount[string(term)] = fb.termsCount[string(term)] + 1 + fb.total++ } func (fb *TermsFacetBuilder) StartDoc() { @@ -87,7 +85,7 @@ func (fb *TermsFacetBuilder) Result() *search.FacetResult { Missing: fb.missing, } - rv.Terms = make([]*search.TermFacet, 0, len(fb.termsCount)) + rv.Terms = &search.TermFacets{} for term, count := range fb.termsCount { tf := &search.TermFacet{ @@ -95,20 +93,20 @@ func (fb *TermsFacetBuilder) Result() *search.FacetResult { Count: count, } - rv.Terms = append(rv.Terms, tf) + rv.Terms.Add(tf) } sort.Sort(rv.Terms) // we now have the list of the top N facets trimTopN := fb.size - if trimTopN > len(rv.Terms) { - trimTopN = len(rv.Terms) + if trimTopN > rv.Terms.Len() { + trimTopN = rv.Terms.Len() } - rv.Terms = rv.Terms[:trimTopN] + rv.Terms.TrimToTopN(trimTopN) notOther := 0 - for _, tf := range rv.Terms { + for _, tf := range rv.Terms.Terms() { notOther += tf.Count } rv.Other = fb.total - notOther diff --git a/vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go b/vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go index 9822257..de5a157 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/facets_builder.go @@ -15,6 +15,7 @@ package search import ( + "encoding/json" "reflect" "sort" @@ -43,7 +44,7 @@ func init() { type FacetBuilder interface { StartDoc() - UpdateVisitor(field string, term []byte) + UpdateVisitor(term []byte) EndDoc() Result() *FacetResult @@ -53,10 +54,11 @@ type FacetBuilder interface { } type FacetsBuilder struct { - indexReader index.IndexReader - facetNames []string - facets []FacetBuilder - fields []string + indexReader index.IndexReader + facetNames []string + facets []FacetBuilder + facetsByField map[string][]FacetBuilder + fields []string } func NewFacetsBuilder(indexReader index.IndexReader) *FacetsBuilder { @@ -80,8 +82,13 @@ func (fb *FacetsBuilder) Size() int { } func (fb *FacetsBuilder) Add(name string, facetBuilder FacetBuilder) { + if fb.facetsByField == nil { + fb.facetsByField = map[string][]FacetBuilder{} + } + fb.facetNames = append(fb.facetNames, name) fb.facets = append(fb.facets, facetBuilder) + fb.facetsByField[facetBuilder.Field()] = append(fb.facetsByField[facetBuilder.Field()], facetBuilder) fb.fields = append(fb.fields, facetBuilder.Field()) } @@ -102,8 +109,10 @@ func (fb *FacetsBuilder) EndDoc() { } func (fb *FacetsBuilder) UpdateVisitor(field string, term []byte) { - for _, facetBuilder := range fb.facets { - facetBuilder.UpdateVisitor(field, term) + if facetBuilders, ok := fb.facetsByField[field]; ok { + for _, facetBuilder := range facetBuilders { + facetBuilder.UpdateVisitor(term) + } } } @@ -112,27 +121,76 @@ type TermFacet struct { Count int `json:"count"` } -type TermFacets []*TermFacet +type TermFacets struct { + termFacets []*TermFacet + termLookup map[string]*TermFacet +} -func (tf TermFacets) Add(termFacet *TermFacet) TermFacets { - for _, existingTerm := range tf { - if termFacet.Term == existingTerm.Term { - existingTerm.Count += termFacet.Count - return tf +func (tf *TermFacets) Terms() []*TermFacet { + if tf == nil { + return []*TermFacet{} + } + return tf.termFacets +} + +func (tf *TermFacets) TrimToTopN(n int) { + tf.termFacets = tf.termFacets[:n] +} + +func (tf *TermFacets) Add(termFacets ...*TermFacet) { + for _, termFacet := range termFacets { + if tf.termLookup == nil { + tf.termLookup = map[string]*TermFacet{} } + + if term, ok := tf.termLookup[termFacet.Term]; ok { + term.Count += termFacet.Count + return + } + + // if we got here it wasn't already in the existing terms + tf.termFacets = append(tf.termFacets, termFacet) + tf.termLookup[termFacet.Term] = termFacet } - // if we got here it wasn't already in the existing terms - tf = append(tf, termFacet) - return tf } -func (tf TermFacets) Len() int { return len(tf) } -func (tf TermFacets) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } -func (tf TermFacets) Less(i, j int) bool { - if tf[i].Count == tf[j].Count { - return tf[i].Term < tf[j].Term +func (tf *TermFacets) Len() int { + // Handle case where *TermFacets is not fully initialized in index_impl.go.init() + if tf == nil { + return 0 } - return tf[i].Count > tf[j].Count + + return len(tf.termFacets) +} +func (tf *TermFacets) Swap(i, j int) { + tf.termFacets[i], tf.termFacets[j] = tf.termFacets[j], tf.termFacets[i] +} +func (tf *TermFacets) Less(i, j int) bool { + if tf.termFacets[i].Count == tf.termFacets[j].Count { + return tf.termFacets[i].Term < tf.termFacets[j].Term + } + return tf.termFacets[i].Count > tf.termFacets[j].Count +} + +// TermFacets used to be a type alias for []*TermFacet. +// To maintain backwards compatibility, we have to implement custom +// JSON marshalling. +func (tf *TermFacets) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.termFacets) +} + +func (tf *TermFacets) UnmarshalJSON(b []byte) error { + termFacets := []*TermFacet{} + err := json.Unmarshal(b, &termFacets) + if err != nil { + return err + } + + for _, termFacet := range termFacets { + tf.Add(termFacet) + } + + return nil } type NumericRangeFacet struct { @@ -246,7 +304,7 @@ type FacetResult struct { Total int `json:"total"` Missing int `json:"missing"` Other int `json:"other"` - Terms TermFacets `json:"terms,omitempty"` + Terms *TermFacets `json:"terms,omitempty"` NumericRanges NumericRangeFacets `json:"numeric_ranges,omitempty"` DateRanges DateRangeFacets `json:"date_ranges,omitempty"` } @@ -254,7 +312,7 @@ type FacetResult struct { func (fr *FacetResult) Size() int { return reflectStaticSizeFacetResult + size.SizeOfPtr + len(fr.Field) + - len(fr.Terms)*(reflectStaticSizeTermFacet+size.SizeOfPtr) + + fr.Terms.Len()*(reflectStaticSizeTermFacet+size.SizeOfPtr) + len(fr.NumericRanges)*(reflectStaticSizeNumericRangeFacet+size.SizeOfPtr) + len(fr.DateRanges)*(reflectStaticSizeDateRangeFacet+size.SizeOfPtr) } @@ -264,8 +322,8 @@ func (fr *FacetResult) Merge(other *FacetResult) { fr.Missing += other.Missing fr.Other += other.Other if fr.Terms != nil && other.Terms != nil { - for _, term := range other.Terms { - fr.Terms = fr.Terms.Add(term) + for _, term := range other.Terms.termFacets { + fr.Terms.Add(term) } } if fr.NumericRanges != nil && other.NumericRanges != nil { @@ -283,12 +341,12 @@ func (fr *FacetResult) Merge(other *FacetResult) { func (fr *FacetResult) Fixup(size int) { if fr.Terms != nil { sort.Sort(fr.Terms) - if len(fr.Terms) > size { - moveToOther := fr.Terms[size:] + if fr.Terms.Len() > size { + moveToOther := fr.Terms.termFacets[size:] for _, mto := range moveToOther { fr.Other += mto.Count } - fr.Terms = fr.Terms[0:size] + fr.Terms.termFacets = fr.Terms.termFacets[0:size] } } else if fr.NumericRanges != nil { sort.Sort(fr.NumericRanges) diff --git a/vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go b/vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go index 348dc1c..34e5c95 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/highlight/fragmenter/simple/simple.go @@ -123,9 +123,15 @@ OUTER: // if there were no terms to highlight // produce a single fragment from the beginning start := 0 - end := start + s.fragmentSize - if end > len(orig) { - end = len(orig) + end := start + used := 0 + for end < len(orig) && used < s.fragmentSize { + r, size := utf8.DecodeRune(orig[end:]) + if r == utf8.RuneError { + break + } + end += size + used++ } rv = append(rv, &highlight.Fragment{Orig: orig, Start: start, End: end}) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go b/vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go index 0272a2f..5aa7bb8 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/bool_field.go @@ -15,6 +15,8 @@ package query import ( + "context" + "github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/search" "github.com/blevesearch/bleve/v2/search/searcher" @@ -51,7 +53,7 @@ func (q *BoolFieldQuery) Field() string { return q.FieldVal } -func (q *BoolFieldQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *BoolFieldQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() @@ -60,5 +62,5 @@ func (q *BoolFieldQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, o if q.Bool { term = "T" } - return searcher.NewTermSearcher(i, term, field, q.BoostVal.Value(), options) + return searcher.NewTermSearcher(ctx, i, term, field, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go b/vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go index b9c504f..b5e1fdc 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/boolean.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -113,11 +114,11 @@ func (q *BooleanQuery) Boost() float64 { return q.BoostVal.Value() } -func (q *BooleanQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *BooleanQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { var err error var mustNotSearcher search.Searcher if q.MustNot != nil { - mustNotSearcher, err = q.MustNot.Searcher(i, m, options) + mustNotSearcher, err = q.MustNot.Searcher(ctx, i, m, options) if err != nil { return nil, err } @@ -129,7 +130,7 @@ func (q *BooleanQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, opt var mustSearcher search.Searcher if q.Must != nil { - mustSearcher, err = q.Must.Searcher(i, m, options) + mustSearcher, err = q.Must.Searcher(ctx, i, m, options) if err != nil { return nil, err } @@ -141,7 +142,7 @@ func (q *BooleanQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, opt var shouldSearcher search.Searcher if q.Should != nil { - shouldSearcher, err = q.Should.Searcher(i, m, options) + shouldSearcher, err = q.Should.Searcher(ctx, i, m, options) if err != nil { return nil, err } @@ -158,7 +159,7 @@ func (q *BooleanQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, opt // if only mustNotSearcher, start with MatchAll if mustSearcher == nil && shouldSearcher == nil && mustNotSearcher != nil { - mustSearcher, err = searcher.NewMatchAllSearcher(i, 1.0, options) + mustSearcher, err = searcher.NewMatchAllSearcher(ctx, i, 1.0, options) if err != nil { return nil, err } @@ -169,7 +170,7 @@ func (q *BooleanQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, opt return shouldSearcher, nil } - return searcher.NewBooleanSearcher(i, mustSearcher, shouldSearcher, mustNotSearcher, options) + return searcher.NewBooleanSearcher(ctx, i, mustSearcher, shouldSearcher, mustNotSearcher, options) } func (q *BooleanQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go index 7d64764..27bec7d 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/conjunction.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "github.com/blevesearch/bleve/v2/mapping" @@ -52,10 +53,10 @@ func (q *ConjunctionQuery) AddQuery(aq ...Query) { } } -func (q *ConjunctionQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *ConjunctionQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { ss := make([]search.Searcher, 0, len(q.Conjuncts)) for _, conjunct := range q.Conjuncts { - sr, err := conjunct.Searcher(i, m, options) + sr, err := conjunct.Searcher(ctx, i, m, options) if err != nil { for _, searcher := range ss { if searcher != nil { @@ -75,7 +76,7 @@ func (q *ConjunctionQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, return searcher.NewMatchNoneSearcher(i) } - return searcher.NewConjunctionSearcher(i, ss, options) + return searcher.NewConjunctionSearcher(ctx, i, ss, options) } func (q *ConjunctionQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go b/vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go index 290786d..ef18f2f 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/date_range.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" "math" @@ -133,7 +134,7 @@ func (q *DateRangeQuery) Field() string { return q.FieldVal } -func (q *DateRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *DateRangeQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { min, max, err := q.parseEndpoints() if err != nil { return nil, err @@ -144,7 +145,7 @@ func (q *DateRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, o field = m.DefaultSearchField() } - return searcher.NewNumericRangeSearcher(i, min, max, q.InclusiveStart, q.InclusiveEnd, field, q.BoostVal.Value(), options) + return searcher.NewNumericRangeSearcher(ctx, i, min, max, q.InclusiveStart, q.InclusiveEnd, field, q.BoostVal.Value(), options) } func (q *DateRangeQuery) parseEndpoints() (*float64, *float64, error) { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go index 50957fa..c6cc0d7 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/disjunction.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -58,11 +59,11 @@ func (q *DisjunctionQuery) SetMin(m float64) { q.Min = m } -func (q *DisjunctionQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, +func (q *DisjunctionQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { ss := make([]search.Searcher, 0, len(q.Disjuncts)) for _, disjunct := range q.Disjuncts { - sr, err := disjunct.Searcher(i, m, options) + sr, err := disjunct.Searcher(ctx, i, m, options) if err != nil { for _, searcher := range ss { if searcher != nil { @@ -82,7 +83,7 @@ func (q *DisjunctionQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, return searcher.NewMatchNoneSearcher(i) } - return searcher.NewDisjunctionSearcher(i, ss, q.Min, options) + return searcher.NewDisjunctionSearcher(ctx, i, ss, q.Min, options) } func (q *DisjunctionQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/docid.go b/vendor/github.com/blevesearch/bleve/v2/search/query/docid.go index 1d27339..7116f39 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/docid.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/docid.go @@ -15,6 +15,8 @@ package query import ( + "context" + "github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/search" "github.com/blevesearch/bleve/v2/search/searcher" @@ -44,6 +46,6 @@ func (q *DocIDQuery) Boost() float64 { return q.BoostVal.Value() } -func (q *DocIDQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewDocIDSearcher(i, q.IDs, q.BoostVal.Value(), options) +func (q *DocIDQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { + return searcher.NewDocIDSearcher(ctx, i, q.IDs, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go b/vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go index aceaa80..f24eb0c 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/fuzzy.go @@ -15,6 +15,8 @@ package query import ( + "context" + "github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/search" "github.com/blevesearch/bleve/v2/search/searcher" @@ -68,10 +70,10 @@ func (q *FuzzyQuery) SetPrefix(p int) { q.Prefix = p } -func (q *FuzzyQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *FuzzyQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() } - return searcher.NewFuzzySearcher(i, q.Term, q.Prefix, q.Fuzziness, field, q.BoostVal.Value(), options) + return searcher.NewFuzzySearcher(ctx, i, q.Term, q.Prefix, q.Fuzziness, field, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go index be4b5a8..ac91253 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingbox.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -56,7 +57,7 @@ func (q *GeoBoundingBoxQuery) Field() string { return q.FieldVal } -func (q *GeoBoundingBoxQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *GeoBoundingBoxQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() @@ -65,20 +66,20 @@ func (q *GeoBoundingBoxQuery) Searcher(i index.IndexReader, m mapping.IndexMappi if q.BottomRight[0] < q.TopLeft[0] { // cross date line, rewrite as two parts - leftSearcher, err := searcher.NewGeoBoundingBoxSearcher(i, -180, q.BottomRight[1], q.BottomRight[0], q.TopLeft[1], field, q.BoostVal.Value(), options, true) + leftSearcher, err := searcher.NewGeoBoundingBoxSearcher(ctx, i, -180, q.BottomRight[1], q.BottomRight[0], q.TopLeft[1], field, q.BoostVal.Value(), options, true) if err != nil { return nil, err } - rightSearcher, err := searcher.NewGeoBoundingBoxSearcher(i, q.TopLeft[0], q.BottomRight[1], 180, q.TopLeft[1], field, q.BoostVal.Value(), options, true) + rightSearcher, err := searcher.NewGeoBoundingBoxSearcher(ctx, i, q.TopLeft[0], q.BottomRight[1], 180, q.TopLeft[1], field, q.BoostVal.Value(), options, true) if err != nil { _ = leftSearcher.Close() return nil, err } - return searcher.NewDisjunctionSearcher(i, []search.Searcher{leftSearcher, rightSearcher}, 0, options) + return searcher.NewDisjunctionSearcher(ctx, i, []search.Searcher{leftSearcher, rightSearcher}, 0, options) } - return searcher.NewGeoBoundingBoxSearcher(i, q.TopLeft[0], q.BottomRight[1], q.BottomRight[0], q.TopLeft[1], field, q.BoostVal.Value(), options, true) + return searcher.NewGeoBoundingBoxSearcher(ctx, i, q.TopLeft[0], q.BottomRight[1], q.BottomRight[0], q.TopLeft[1], field, q.BoostVal.Value(), options, true) } func (q *GeoBoundingBoxQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go index abb8ccd..467f39b 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_boundingpolygon.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -53,14 +54,14 @@ func (q *GeoBoundingPolygonQuery) Field() string { return q.FieldVal } -func (q *GeoBoundingPolygonQuery) Searcher(i index.IndexReader, +func (q *GeoBoundingPolygonQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() } - return searcher.NewGeoBoundedPolygonSearcher(i, q.Points, field, q.BoostVal.Value(), options) + return searcher.NewGeoBoundedPolygonSearcher(ctx, i, q.Points, field, q.BoostVal.Value(), options) } func (q *GeoBoundingPolygonQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go index d5174c2..f05bf67 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_distance.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -56,7 +57,7 @@ func (q *GeoDistanceQuery) Field() string { return q.FieldVal } -func (q *GeoDistanceQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, +func (q *GeoDistanceQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { @@ -68,7 +69,7 @@ func (q *GeoDistanceQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, return nil, err } - return searcher.NewGeoPointDistanceSearcher(i, q.Location[0], q.Location[1], + return searcher.NewGeoPointDistanceSearcher(ctx, i, q.Location[0], q.Location[1], dist, field, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/geo_shape.go b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_shape.go new file mode 100644 index 0000000..a63ec80 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/geo_shape.go @@ -0,0 +1,135 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package query + +import ( + "context" + "encoding/json" + + "github.com/blevesearch/bleve/v2/geo" + "github.com/blevesearch/bleve/v2/mapping" + "github.com/blevesearch/bleve/v2/search" + "github.com/blevesearch/bleve/v2/search/searcher" + index "github.com/blevesearch/bleve_index_api" +) + +type Geometry struct { + Shape index.GeoJSON `json:"shape"` + Relation string `json:"relation"` +} + +type GeoShapeQuery struct { + Geometry Geometry `json:"geometry"` + FieldVal string `json:"field,omitempty"` + BoostVal *Boost `json:"boost,omitempty"` +} + +// NewGeoShapeQuery creates a geoshape query for the +// given shape type. This method can be used for +// creating geoshape queries for shape types like: point, +// linestring, polygon, multipoint, multilinestring, +// multipolygon and envelope. +func NewGeoShapeQuery(coordinates [][][][]float64, typ, + relation string) (*GeoShapeQuery, error) { + s, _, err := geo.NewGeoJsonShape(coordinates, typ) + if err != nil { + return nil, err + } + + return &GeoShapeQuery{Geometry: Geometry{Shape: s, + Relation: relation}}, nil +} + +// NewGeoShapeCircleQuery creates a geoshape query for the +// given center point and the radius. Radius formats supported: +// "5in" "5inch" "7yd" "7yards" "9ft" "9feet" "11km" "11kilometers" +// "3nm" "3nauticalmiles" "13mm" "13millimeters" "15cm" "15centimeters" +// "17mi" "17miles" "19m" "19meters" If the unit cannot be determined, +// the entire string is parsed and the unit of meters is assumed. +func NewGeoShapeCircleQuery(coordinates []float64, radius, + relation string) (*GeoShapeQuery, error) { + + s, _, err := geo.NewGeoCircleShape(coordinates, radius) + if err != nil { + return nil, err + } + + return &GeoShapeQuery{Geometry: Geometry{Shape: s, + Relation: relation}}, nil +} + +// NewGeometryCollectionQuery creates a geoshape query for the +// given geometrycollection coordinates and types. +func NewGeometryCollectionQuery(coordinates [][][][][]float64, types []string, + relation string) (*GeoShapeQuery, error) { + s, _, err := geo.NewGeometryCollection(coordinates, types) + if err != nil { + return nil, err + } + + return &GeoShapeQuery{Geometry: Geometry{Shape: s, + Relation: relation}}, nil +} + +func (q *GeoShapeQuery) SetBoost(b float64) { + boost := Boost(b) + q.BoostVal = &boost +} + +func (q *GeoShapeQuery) Boost() float64 { + return q.BoostVal.Value() +} + +func (q *GeoShapeQuery) SetField(f string) { + q.FieldVal = f +} + +func (q *GeoShapeQuery) Field() string { + return q.FieldVal +} + +func (q *GeoShapeQuery) Searcher(ctx context.Context, i index.IndexReader, + m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { + field := q.FieldVal + if q.FieldVal == "" { + field = m.DefaultSearchField() + } + + return searcher.NewGeoShapeSearcher(ctx, i, q.Geometry.Shape, q.Geometry.Relation, field, + q.BoostVal.Value(), options) +} + +func (q *GeoShapeQuery) Validate() error { + return nil +} + +func (q *Geometry) UnmarshalJSON(data []byte) error { + tmp := struct { + Shape json.RawMessage `json:"shape"` + Relation string `json:"relation"` + }{} + + err := json.Unmarshal(data, &tmp) + if err != nil { + return err + } + + q.Shape, err = geo.ParseGeoJSONShape(tmp.Shape) + if err != nil { + return err + } + q.Relation = tmp.Relation + return nil +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/ip_range.go b/vendor/github.com/blevesearch/bleve/v2/search/query/ip_range.go new file mode 100644 index 0000000..68577cc --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/ip_range.go @@ -0,0 +1,85 @@ +// Copyright (c) 2021 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package query + +import ( + "context" + "fmt" + "net" + + "github.com/blevesearch/bleve/v2/mapping" + "github.com/blevesearch/bleve/v2/search" + "github.com/blevesearch/bleve/v2/search/searcher" + index "github.com/blevesearch/bleve_index_api" +) + +type IPRangeQuery struct { + CIDR string `json:"cidr, omitempty"` + FieldVal string `json:"field,omitempty"` + BoostVal *Boost `json:"boost,omitempty"` +} + +func NewIPRangeQuery(cidr string) *IPRangeQuery { + return &IPRangeQuery{ + CIDR: cidr, + } +} + +func (q *IPRangeQuery) SetBoost(b float64) { + boost := Boost(b) + q.BoostVal = &boost +} + +func (q *IPRangeQuery) Boost() float64 { + return q.BoostVal.Value() +} + +func (q *IPRangeQuery) SetField(f string) { + q.FieldVal = f +} + +func (q *IPRangeQuery) Field() string { + return q.FieldVal +} + +func (q *IPRangeQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { + field := q.FieldVal + if q.FieldVal == "" { + field = m.DefaultSearchField() + } + _, ipNet, err := net.ParseCIDR(q.CIDR) + if err != nil { + ip := net.ParseIP(q.CIDR) + if ip == nil { + return nil, err + } + // If we are searching for a specific ip rather than members of a network, just use a term search. + return searcher.NewTermSearcherBytes(ctx, i, ip.To16(), field, q.BoostVal.Value(), options) + } + return searcher.NewIPRangeSearcher(ctx, i, ipNet, field, q.BoostVal.Value(), options) +} + +func (q *IPRangeQuery) Validate() error { + _, _, err := net.ParseCIDR(q.CIDR) + if err == nil { + return nil + } + // We also allow search for a specific IP. + ip := net.ParseIP(q.CIDR) + if ip != nil { + return nil // we have a valid ip + } + return fmt.Errorf("IPRangeQuery must be for an network or ip address, %q", q.CIDR) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match.go index da1dc09..61c00a0 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/match.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -114,7 +115,7 @@ func (q *MatchQuery) SetOperator(operator MatchQueryOperator) { q.Operator = operator } -func (q *MatchQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *MatchQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { @@ -160,17 +161,17 @@ func (q *MatchQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, optio shouldQuery := NewDisjunctionQuery(tqs) shouldQuery.SetMin(1) shouldQuery.SetBoost(q.BoostVal.Value()) - return shouldQuery.Searcher(i, m, options) + return shouldQuery.Searcher(ctx, i, m, options) case MatchQueryOperatorAnd: mustQuery := NewConjunctionQuery(tqs) mustQuery.SetBoost(q.BoostVal.Value()) - return mustQuery.Searcher(i, m, options) + return mustQuery.Searcher(ctx, i, m, options) default: return nil, fmt.Errorf("unhandled operator %d", q.Operator) } } noneQuery := NewMatchNoneQuery() - return noneQuery.Searcher(i, m, options) + return noneQuery.Searcher(ctx, i, m, options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go index a31f25a..e88825a 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/match_all.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "github.com/blevesearch/bleve/v2/mapping" @@ -42,8 +43,8 @@ func (q *MatchAllQuery) Boost() float64 { return q.BoostVal.Value() } -func (q *MatchAllQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewMatchAllSearcher(i, q.BoostVal.Value(), options) +func (q *MatchAllQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { + return searcher.NewMatchAllSearcher(ctx, i, q.BoostVal.Value(), options) } func (q *MatchAllQuery) MarshalJSON() ([]byte, error) { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go index 69b4418..cb65a72 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/match_none.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "github.com/blevesearch/bleve/v2/mapping" @@ -42,7 +43,7 @@ func (q *MatchNoneQuery) Boost() float64 { return q.BoostVal.Value() } -func (q *MatchNoneQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *MatchNoneQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { return searcher.NewMatchNoneSearcher(i) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go b/vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go index 057245f..fa8ac72 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/match_phrase.go @@ -15,6 +15,7 @@ package query import ( + "context" "fmt" "github.com/blevesearch/bleve/v2/analysis" @@ -61,7 +62,7 @@ func (q *MatchPhraseQuery) Field() string { return q.FieldVal } -func (q *MatchPhraseQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *MatchPhraseQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() @@ -83,10 +84,10 @@ func (q *MatchPhraseQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, phrase := tokenStreamToPhrase(tokens) phraseQuery := NewMultiPhraseQuery(phrase, field) phraseQuery.SetBoost(q.BoostVal.Value()) - return phraseQuery.Searcher(i, m, options) + return phraseQuery.Searcher(ctx, i, m, options) } noneQuery := NewMatchNoneQuery() - return noneQuery.Searcher(i, m, options) + return noneQuery.Searcher(ctx, i, m, options) } func tokenStreamToPhrase(tokens analysis.TokenStream) [][]string { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go b/vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go index d75dc0c..2887be1 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/multi_phrase.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -55,8 +56,8 @@ func (q *MultiPhraseQuery) Boost() float64 { return q.BoostVal.Value() } -func (q *MultiPhraseQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewMultiPhraseSearcher(i, q.Terms, q.Field, options) +func (q *MultiPhraseQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { + return searcher.NewMultiPhraseSearcher(ctx, i, q.Terms, q.Field, options) } func (q *MultiPhraseQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go b/vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go index a1fe7b6..ad24741 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/numeric_range.go @@ -15,6 +15,7 @@ package query import ( + "context" "fmt" "github.com/blevesearch/bleve/v2/mapping" @@ -71,12 +72,12 @@ func (q *NumericRangeQuery) Field() string { return q.FieldVal } -func (q *NumericRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *NumericRangeQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() } - return searcher.NewNumericRangeSearcher(i, q.Min, q.Max, q.InclusiveMin, q.InclusiveMax, field, q.BoostVal.Value(), options) + return searcher.NewNumericRangeSearcher(ctx, i, q.Min, q.Max, q.InclusiveMin, q.InclusiveMax, field, q.BoostVal.Value(), options) } func (q *NumericRangeQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go b/vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go index d6da118..207e66b 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/phrase.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" @@ -52,8 +53,8 @@ func (q *PhraseQuery) Boost() float64 { return q.BoostVal.Value() } -func (q *PhraseQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { - return searcher.NewPhraseSearcher(i, q.Terms, q.Field, options) +func (q *PhraseQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { + return searcher.NewPhraseSearcher(ctx, i, q.Terms, q.Field, options) } func (q *PhraseQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go b/vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go index 05dc40c..debbbc1 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/prefix.go @@ -15,6 +15,8 @@ package query import ( + "context" + "github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/search" "github.com/blevesearch/bleve/v2/search/searcher" @@ -53,10 +55,10 @@ func (q *PrefixQuery) Field() string { return q.FieldVal } -func (q *PrefixQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *PrefixQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() } - return searcher.NewTermPrefixSearcher(i, q.Prefix, field, q.BoostVal.Value(), options) + return searcher.NewTermPrefixSearcher(ctx, i, q.Prefix, field, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query.go b/vendor/github.com/blevesearch/bleve/v2/search/query/query.go index 7f2781c..df56053 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/query.go @@ -15,6 +15,7 @@ package query import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -36,7 +37,7 @@ func SetLog(l *log.Logger) { // A Query represents a description of the type // and parameters for a query into the index. type Query interface { - Searcher(i index.IndexReader, m mapping.IndexMapping, + Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) } @@ -282,6 +283,27 @@ func ParseQuery(input []byte) (Query, error) { } return &rv, nil } + + _, hasGeo := tmp["geometry"] + if hasGeo { + var rv GeoShapeQuery + err := json.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + return &rv, nil + } + + _, hasCIDR := tmp["cidr"] + if hasCIDR { + var rv IPRangeQuery + err := json.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + return &rv, nil + } + return nil, fmt.Errorf("unknown query type") } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go b/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go index fe1680c..42bb598 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/query_string.go @@ -15,6 +15,8 @@ package query import ( + "context" + "github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/search" index "github.com/blevesearch/bleve_index_api" @@ -47,12 +49,12 @@ func (q *QueryStringQuery) Parse() (Query, error) { return parseQuerySyntax(q.Query) } -func (q *QueryStringQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *QueryStringQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { newQuery, err := parseQuerySyntax(q.Query) if err != nil { return nil, err } - return newQuery.Searcher(i, m, options) + return newQuery.Searcher(ctx, i, m, options) } func (q *QueryStringQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_lex.go b/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_lex.go index 3a9cf23..c01fa6f 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_lex.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/query_string_lex.go @@ -248,8 +248,8 @@ func inTildeState(l *queryStringLex, next rune, eof bool) (lexState, bool) { } func inNumOrStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { - // only a non-escaped space ends the tilde (or eof) - if eof || (!l.inEscape && next == ' ') { + // end on non-escaped space, colon, tilde, boost (or eof) + if eof || (!l.inEscape && (next == ' ' || next == ':' || next == '^' || next == '~')) { // end number l.nextTokenType = tNUMBER l.nextToken = &yySymType{ @@ -257,7 +257,13 @@ func inNumOrStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { } logDebugTokens("NUMBER - '%s'", l.nextToken.s) l.reset() - return startState, true + + consumed := true + if !eof && (next == ':' || next == '^' || next == '~') { + consumed = false + } + + return startState, consumed } else if !l.inEscape && next == '\\' { l.inEscape = true return inNumOrStrState, true @@ -287,7 +293,7 @@ func inNumOrStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { } func inStrState(l *queryStringLex, next rune, eof bool) (lexState, bool) { - // end on non-escped space, colon, tilde, boost (or eof) + // end on non-escaped space, colon, tilde, boost (or eof) if eof || (!l.inEscape && (next == ' ' || next == ':' || next == '^' || next == '~')) { // end string l.nextTokenType = tSTRING diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go b/vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go index ba744ec..6b3da95 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/regexp.go @@ -15,6 +15,7 @@ package query import ( + "context" "strings" "github.com/blevesearch/bleve/v2/mapping" @@ -57,7 +58,7 @@ func (q *RegexpQuery) Field() string { return q.FieldVal } -func (q *RegexpQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *RegexpQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() @@ -72,7 +73,7 @@ func (q *RegexpQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, opti actualRegexp = actualRegexp[1:] // remove leading ^ } - return searcher.NewRegexpStringSearcher(i, actualRegexp, field, + return searcher.NewRegexpStringSearcher(ctx, i, actualRegexp, field, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/term.go b/vendor/github.com/blevesearch/bleve/v2/search/query/term.go index 82958bb..5c6af39 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/term.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/term.go @@ -15,6 +15,8 @@ package query import ( + "context" + "github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/search" "github.com/blevesearch/bleve/v2/search/searcher" @@ -52,10 +54,10 @@ func (q *TermQuery) Field() string { return q.FieldVal } -func (q *TermQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *TermQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() } - return searcher.NewTermSearcher(i, q.Term, field, q.BoostVal.Value(), options) + return searcher.NewTermSearcher(ctx, i, q.Term, field, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go b/vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go index 3edfa69..4dc3a34 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/term_range.go @@ -15,6 +15,7 @@ package query import ( + "context" "fmt" "github.com/blevesearch/bleve/v2/mapping" @@ -71,7 +72,7 @@ func (q *TermRangeQuery) Field() string { return q.FieldVal } -func (q *TermRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *TermRangeQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() @@ -84,7 +85,7 @@ func (q *TermRangeQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, o if q.Max != "" { maxTerm = []byte(q.Max) } - return searcher.NewTermRangeSearcher(i, minTerm, maxTerm, q.InclusiveMin, q.InclusiveMax, field, q.BoostVal.Value(), options) + return searcher.NewTermRangeSearcher(ctx, i, minTerm, maxTerm, q.InclusiveMin, q.InclusiveMax, field, q.BoostVal.Value(), options) } func (q *TermRangeQuery) Validate() error { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go b/vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go index 7713a9a..f04f3f2 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/query/wildcard.go @@ -15,6 +15,7 @@ package query import ( + "context" "strings" "github.com/blevesearch/bleve/v2/mapping" @@ -76,7 +77,7 @@ func (q *WildcardQuery) Field() string { return q.FieldVal } -func (q *WildcardQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { +func (q *WildcardQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { field := q.FieldVal if q.FieldVal == "" { field = m.DefaultSearchField() @@ -84,7 +85,7 @@ func (q *WildcardQuery) Searcher(i index.IndexReader, m mapping.IndexMapping, op regexpString := wildcardRegexpReplacer.Replace(q.Wildcard) - return searcher.NewRegexpStringSearcher(i, regexpString, field, + return searcher.NewRegexpStringSearcher(ctx, i, regexpString, field, q.BoostVal.Value(), options) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go b/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go index ca26864..7b60eda 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/scorer/scorer_term.go @@ -198,6 +198,5 @@ func (s *TermQueryScorer) Score(ctx *search.SearchContext, termMatch *index.Term }) } } - return rv } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/search.go b/vendor/github.com/blevesearch/bleve/v2/search/search.go index 9277d52..69d8945 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/search.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/search.go @@ -27,6 +27,10 @@ var reflectStaticSizeDocumentMatch int var reflectStaticSizeSearchContext int var reflectStaticSizeLocation int +const SearchIOStatsCallbackKey = "_search_io_stats_callback_key" + +type SearchIOStatsCallbackFunc func(uint64) + func init() { var dm DocumentMatch reflectStaticSizeDocumentMatch = int(reflect.TypeOf(dm).Size()) @@ -270,7 +274,7 @@ func (dm *DocumentMatch) Complete(prealloc []Location) []Location { var needsDedupe bool for i, ftl := range dm.FieldTermLocations { - if lastField != ftl.Field { + if i == 0 || lastField != ftl.Field { lastField = ftl.Field if dm.Locations == nil { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_boolean.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_boolean.go index ef9093c..bf207f8 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_boolean.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_boolean.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "math" "reflect" @@ -48,7 +49,7 @@ type BooleanSearcher struct { done bool } -func NewBooleanSearcher(indexReader index.IndexReader, mustSearcher search.Searcher, shouldSearcher search.Searcher, mustNotSearcher search.Searcher, options search.SearcherOptions) (*BooleanSearcher, error) { +func NewBooleanSearcher(ctx context.Context, indexReader index.IndexReader, mustSearcher search.Searcher, shouldSearcher search.Searcher, mustNotSearcher search.Searcher, options search.SearcherOptions) (*BooleanSearcher, error) { // build our searcher rv := BooleanSearcher{ indexReader: indexReader, diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_conjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_conjunction.go index 5fe59b9..19ef199 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_conjunction.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_conjunction.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "math" "reflect" "sort" @@ -41,9 +42,10 @@ type ConjunctionSearcher struct { scorer *scorer.ConjunctionQueryScorer initialized bool options search.SearcherOptions + bytesRead uint64 } -func NewConjunctionSearcher(indexReader index.IndexReader, +func NewConjunctionSearcher(ctx context.Context, indexReader index.IndexReader, qsearchers []search.Searcher, options search.SearcherOptions) ( search.Searcher, error) { // build the sorted downstream searchers @@ -57,7 +59,7 @@ func NewConjunctionSearcher(indexReader index.IndexReader, // do not need extra information like freq-norm's or term vectors if len(searchers) > 1 && options.Score == "none" && !options.IncludeTermVectors { - rv, err := optimizeCompositeSearcher("conjunction:unadorned", + rv, err := optimizeCompositeSearcher(ctx, "conjunction:unadorned", indexReader, searchers, options) if err != nil || rv != nil { return rv, err @@ -76,7 +78,7 @@ func NewConjunctionSearcher(indexReader index.IndexReader, // attempt push-down conjunction optimization when there's >1 searchers if len(searchers) > 1 { - rv, err := optimizeCompositeSearcher("conjunction", + rv, err := optimizeCompositeSearcher(ctx, "conjunction", indexReader, searchers, options) if err != nil || rv != nil { return rv, err diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go index 4cee468..606a157 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction.go @@ -15,7 +15,9 @@ package searcher import ( + "context" "fmt" + "github.com/blevesearch/bleve/v2/search" index "github.com/blevesearch/bleve_index_api" ) @@ -30,10 +32,10 @@ var DisjunctionMaxClauseCount = 0 // slice implementation to a heap implementation. var DisjunctionHeapTakeover = 10 -func NewDisjunctionSearcher(indexReader index.IndexReader, +func NewDisjunctionSearcher(ctx context.Context, indexReader index.IndexReader, qsearchers []search.Searcher, min float64, options search.SearcherOptions) ( search.Searcher, error) { - return newDisjunctionSearcher(indexReader, qsearchers, min, options, true) + return newDisjunctionSearcher(ctx, indexReader, qsearchers, min, options, true) } func optionsDisjunctionOptimizable(options search.SearcherOptions) bool { @@ -41,7 +43,7 @@ func optionsDisjunctionOptimizable(options search.SearcherOptions) bool { return rv } -func newDisjunctionSearcher(indexReader index.IndexReader, +func newDisjunctionSearcher(ctx context.Context, indexReader index.IndexReader, qsearchers []search.Searcher, min float64, options search.SearcherOptions, limit bool) (search.Searcher, error) { // attempt the "unadorned" disjunction optimization only when we @@ -49,7 +51,7 @@ func newDisjunctionSearcher(indexReader index.IndexReader, // and the requested min is simple if len(qsearchers) > 1 && min <= 1 && optionsDisjunctionOptimizable(options) { - rv, err := optimizeCompositeSearcher("disjunction:unadorned", + rv, err := optimizeCompositeSearcher(ctx, "disjunction:unadorned", indexReader, qsearchers, options) if err != nil || rv != nil { return rv, err @@ -57,14 +59,14 @@ func newDisjunctionSearcher(indexReader index.IndexReader, } if len(qsearchers) > DisjunctionHeapTakeover { - return newDisjunctionHeapSearcher(indexReader, qsearchers, min, options, + return newDisjunctionHeapSearcher(ctx, indexReader, qsearchers, min, options, limit) } - return newDisjunctionSliceSearcher(indexReader, qsearchers, min, options, + return newDisjunctionSliceSearcher(ctx, indexReader, qsearchers, min, options, limit) } -func optimizeCompositeSearcher(optimizationKind string, +func optimizeCompositeSearcher(ctx context.Context, optimizationKind string, indexReader index.IndexReader, qsearchers []search.Searcher, options search.SearcherOptions) (search.Searcher, error) { var octx index.OptimizableContext diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_heap.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_heap.go index bf94597..0235838 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_heap.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_heap.go @@ -17,6 +17,7 @@ package searcher import ( "bytes" "container/heap" + "context" "math" "reflect" @@ -55,9 +56,11 @@ type DisjunctionHeapSearcher struct { matching []*search.DocumentMatch matchingCurrs []*SearcherCurr + + bytesRead uint64 } -func newDisjunctionHeapSearcher(indexReader index.IndexReader, +func newDisjunctionHeapSearcher(ctx context.Context, indexReader index.IndexReader, searchers []search.Searcher, min float64, options search.SearcherOptions, limit bool) ( *DisjunctionHeapSearcher, error) { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_slice.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_slice.go index 79fee9f..6958cf4 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_slice.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_disjunction_slice.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "math" "reflect" "sort" @@ -43,9 +44,10 @@ type DisjunctionSliceSearcher struct { matching []*search.DocumentMatch matchingIdxs []int initialized bool + bytesRead uint64 } -func newDisjunctionSliceSearcher(indexReader index.IndexReader, +func newDisjunctionSliceSearcher(ctx context.Context, indexReader index.IndexReader, qsearchers []search.Searcher, min float64, options search.SearcherOptions, limit bool) ( *DisjunctionSliceSearcher, error) { @@ -156,7 +158,6 @@ func (s *DisjunctionSliceSearcher) updateMatches() error { matchingIdxs = matchingIdxs[:0] } } - matching = append(matching, curr) matchingIdxs = append(matchingIdxs, i) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go index 2d90ae1..720fd32 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_docid.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "reflect" "github.com/blevesearch/bleve/v2/search" @@ -37,7 +38,7 @@ type DocIDSearcher struct { count int } -func NewDocIDSearcher(indexReader index.IndexReader, ids []string, boost float64, +func NewDocIDSearcher(ctx context.Context, indexReader index.IndexReader, ids []string, boost float64, options search.SearcherOptions) (searcher *DocIDSearcher, err error) { reader, err := indexReader.DocIDReaderOnly(ids) diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go index 9cab0f7..4e4dd5e 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_filter.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "reflect" "github.com/blevesearch/bleve/v2/search" @@ -41,7 +42,7 @@ type FilteringSearcher struct { accept FilterFunc } -func NewFilteringSearcher(s search.Searcher, filter FilterFunc) *FilteringSearcher { +func NewFilteringSearcher(ctx context.Context, s search.Searcher, filter FilterFunc) *FilteringSearcher { return &FilteringSearcher{ child: s, accept: filter, diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go index aab6701..9423b61 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_fuzzy.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "fmt" "github.com/blevesearch/bleve/v2/search" @@ -23,7 +24,7 @@ import ( var MaxFuzziness = 2 -func NewFuzzySearcher(indexReader index.IndexReader, term string, +func NewFuzzySearcher(ctx context.Context, indexReader index.IndexReader, term string, prefix, fuzziness int, field string, boost float64, options search.SearcherOptions) (search.Searcher, error) { @@ -44,19 +45,47 @@ func NewFuzzySearcher(indexReader index.IndexReader, term string, break } } - candidateTerms, err := findFuzzyCandidateTerms(indexReader, term, fuzziness, + fuzzyCandidates, err := findFuzzyCandidateTerms(indexReader, term, fuzziness, field, prefixTerm) if err != nil { return nil, err } - return NewMultiTermSearcher(indexReader, candidateTerms, field, + var candidates []string + var dictBytesRead uint64 + if fuzzyCandidates != nil { + candidates = fuzzyCandidates.candidates + dictBytesRead = fuzzyCandidates.bytesRead + } + + if ctx != nil { + reportIOStats(dictBytesRead, ctx) + } + + return NewMultiTermSearcher(ctx, indexReader, candidates, field, boost, options, true) } +type fuzzyCandidates struct { + candidates []string + bytesRead uint64 +} + +func reportIOStats(bytesRead uint64, ctx context.Context) { + // The fuzzy, regexp like queries essentially load a dictionary, + // which potentially incurs a cost that must be accounted by + // using the callback to report the value. + statsCallbackFn := ctx.Value(search.SearchIOStatsCallbackKey) + if statsCallbackFn != nil { + statsCallbackFn.(search.SearchIOStatsCallbackFunc)(bytesRead) + } +} + func findFuzzyCandidateTerms(indexReader index.IndexReader, term string, - fuzziness int, field, prefixTerm string) (rv []string, err error) { - rv = make([]string, 0) + fuzziness int, field, prefixTerm string) (rv *fuzzyCandidates, err error) { + rv = &fuzzyCandidates{ + candidates: make([]string, 0), + } // in case of advanced reader implementations directly call // the levenshtein automaton based iterator to collect the @@ -73,12 +102,14 @@ func findFuzzyCandidateTerms(indexReader index.IndexReader, term string, }() tfd, err := fieldDict.Next() for err == nil && tfd != nil { - rv = append(rv, tfd.Term) - if tooManyClauses(len(rv)) { - return nil, tooManyClausesErr(field, len(rv)) + rv.candidates = append(rv.candidates, tfd.Term) + if tooManyClauses(len(rv.candidates)) { + return nil, tooManyClausesErr(field, len(rv.candidates)) } tfd, err = fieldDict.Next() } + + rv.bytesRead = fieldDict.BytesRead() return rv, err } @@ -105,13 +136,14 @@ func findFuzzyCandidateTerms(indexReader index.IndexReader, term string, var exceeded bool ld, exceeded, reuse = search.LevenshteinDistanceMaxReuseSlice(term, tfd.Term, fuzziness, reuse) if !exceeded && ld <= fuzziness { - rv = append(rv, tfd.Term) - if tooManyClauses(len(rv)) { - return nil, tooManyClausesErr(field, len(rv)) + rv.candidates = append(rv.candidates, tfd.Term) + if tooManyClauses(len(rv.candidates)) { + return nil, tooManyClausesErr(field, len(rv.candidates)) } } tfd, err = fieldDict.Next() } + rv.bytesRead = fieldDict.BytesRead() return rv, err } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go index a231514..05ca1bf 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoboundingbox.go @@ -15,6 +15,8 @@ package searcher import ( + "context" + "github.com/blevesearch/bleve/v2/document" "github.com/blevesearch/bleve/v2/geo" "github.com/blevesearch/bleve/v2/numeric" @@ -27,10 +29,32 @@ type filterFunc func(key []byte) bool var GeoBitsShift1 = geo.GeoBits << 1 var GeoBitsShift1Minus1 = GeoBitsShift1 - 1 -func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, +func NewGeoBoundingBoxSearcher(ctx context.Context, indexReader index.IndexReader, minLon, minLat, maxLon, maxLat float64, field string, boost float64, options search.SearcherOptions, checkBoundaries bool) ( search.Searcher, error) { + if tp, ok := indexReader.(index.SpatialIndexPlugin); ok { + sp, err := tp.GetSpatialAnalyzerPlugin("s2") + if err == nil { + terms := sp.GetQueryTokens(geo.NewBoundedRectangle(minLat, + minLon, maxLat, maxLon)) + boxSearcher, err := NewMultiTermSearcher(ctx, indexReader, + terms, field, boost, options, false) + if err != nil { + return nil, err + } + + dvReader, err := indexReader.DocValueReader([]string{field}) + if err != nil { + return nil, err + } + + return NewFilteringSearcher(ctx, boxSearcher, buildRectFilter(dvReader, + field, minLon, minLat, maxLon, maxLat)), nil + } + } + + // indexes without the spatial plugin override would continue here. // track list of opened searchers, for cleanup on early exit var openedSearchers []search.Searcher @@ -41,7 +65,7 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, } // do math to produce list of terms needed for this search - onBoundaryTerms, notOnBoundaryTerms, err := ComputeGeoRange(0, GeoBitsShift1Minus1, + onBoundaryTerms, notOnBoundaryTerms, err := ComputeGeoRange(nil, 0, GeoBitsShift1Minus1, minLon, minLat, maxLon, maxLat, checkBoundaries, indexReader, field) if err != nil { return nil, err @@ -54,13 +78,13 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, } if len(onBoundaryTerms) > 0 { - rawOnBoundarySearcher, err := NewMultiTermSearcherBytes(indexReader, + rawOnBoundarySearcher, err := NewMultiTermSearcherBytes(ctx, indexReader, onBoundaryTerms, field, boost, options, false) if err != nil { return nil, err } // add filter to check points near the boundary - onBoundarySearcher = NewFilteringSearcher(rawOnBoundarySearcher, + onBoundarySearcher = NewFilteringSearcher(ctx, rawOnBoundarySearcher, buildRectFilter(dvReader, field, minLon, minLat, maxLon, maxLat)) openedSearchers = append(openedSearchers, onBoundarySearcher) } @@ -68,7 +92,7 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, var notOnBoundarySearcher search.Searcher if len(notOnBoundaryTerms) > 0 { var err error - notOnBoundarySearcher, err = NewMultiTermSearcherBytes(indexReader, + notOnBoundarySearcher, err = NewMultiTermSearcherBytes(ctx, indexReader, notOnBoundaryTerms, field, boost, options, false) if err != nil { cleanupOpenedSearchers() @@ -78,7 +102,7 @@ func NewGeoBoundingBoxSearcher(indexReader index.IndexReader, minLon, minLat, } if onBoundarySearcher != nil && notOnBoundarySearcher != nil { - rv, err := NewDisjunctionSearcher(indexReader, + rv, err := NewDisjunctionSearcher(ctx, indexReader, []search.Searcher{ onBoundarySearcher, notOnBoundarySearcher, @@ -103,12 +127,12 @@ var geoDetailLevel = ((geo.GeoBits << 1) - geoMaxShift) / 2 type closeFunc func() error -func ComputeGeoRange(term uint64, shift uint, +func ComputeGeoRange(ctx context.Context, term uint64, shift uint, sminLon, sminLat, smaxLon, smaxLat float64, checkBoundaries bool, indexReader index.IndexReader, field string) ( onBoundary [][]byte, notOnBoundary [][]byte, err error) { - isIndexed, closeF, err := buildIsIndexedFunc(indexReader, field) + isIndexed, closeF, err := buildIsIndexedFunc(ctx, indexReader, field) if closeF != nil { defer func() { cerr := closeF() @@ -134,7 +158,7 @@ func ComputeGeoRange(term uint64, shift uint, return grc.onBoundary, grc.notOnBoundary, nil } -func buildIsIndexedFunc(indexReader index.IndexReader, field string) (isIndexed filterFunc, closeF closeFunc, err error) { +func buildIsIndexedFunc(ctx context.Context, indexReader index.IndexReader, field string) (isIndexed filterFunc, closeF closeFunc, err error) { if irr, ok := indexReader.(index.IndexReaderContains); ok { fieldDict, err := irr.FieldDictContains(field) if err != nil { @@ -157,7 +181,7 @@ func buildIsIndexedFunc(indexReader index.IndexReader, field string) (isIndexed } } else if indexReader != nil { isIndexed = func(term []byte) bool { - reader, err := indexReader.TermFieldReader(term, field, false, false, false) + reader, err := indexReader.TermFieldReader(ctx, term, field, false, false, false) if err != nil || reader == nil { return false } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go index c62a2a5..01ed209 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopointdistance.go @@ -15,28 +15,48 @@ package searcher import ( + "context" + "github.com/blevesearch/bleve/v2/geo" "github.com/blevesearch/bleve/v2/numeric" "github.com/blevesearch/bleve/v2/search" index "github.com/blevesearch/bleve_index_api" ) -func NewGeoPointDistanceSearcher(indexReader index.IndexReader, centerLon, +func NewGeoPointDistanceSearcher(ctx context.Context, indexReader index.IndexReader, centerLon, centerLat, dist float64, field string, boost float64, options search.SearcherOptions) (search.Searcher, error) { - // compute bounding box containing the circle - topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, err := - geo.RectFromPointDistance(centerLon, centerLat, dist) - if err != nil { - return nil, err + var rectSearcher search.Searcher + if tp, ok := indexReader.(index.SpatialIndexPlugin); ok { + sp, err := tp.GetSpatialAnalyzerPlugin("s2") + if err == nil { + terms := sp.GetQueryTokens(geo.NewPointDistance(centerLat, + centerLon, dist)) + rectSearcher, err = NewMultiTermSearcher(ctx, indexReader, terms, + field, boost, options, false) + if err != nil { + return nil, err + } + } } - // build a searcher for the box - boxSearcher, err := boxSearcher(indexReader, - topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, - field, boost, options, false) - if err != nil { - return nil, err + // indexes without the spatial plugin override would get + // initialized here. + if rectSearcher == nil { + // compute bounding box containing the circle + topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, err := + geo.RectFromPointDistance(centerLon, centerLat, dist) + if err != nil { + return nil, err + } + + // build a searcher for the box + rectSearcher, err = boxSearcher(ctx, indexReader, + topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, + field, boost, options, false) + if err != nil { + return nil, err + } } dvReader, err := indexReader.DocValueReader([]string{field}) @@ -45,27 +65,27 @@ func NewGeoPointDistanceSearcher(indexReader index.IndexReader, centerLon, } // wrap it in a filtering searcher which checks the actual distance - return NewFilteringSearcher(boxSearcher, + return NewFilteringSearcher(ctx, rectSearcher, buildDistFilter(dvReader, field, centerLon, centerLat, dist)), nil } // boxSearcher builds a searcher for the described bounding box // if the desired box crosses the dateline, it is automatically split into // two boxes joined through a disjunction searcher -func boxSearcher(indexReader index.IndexReader, +func boxSearcher(ctx context.Context, indexReader index.IndexReader, topLeftLon, topLeftLat, bottomRightLon, bottomRightLat float64, field string, boost float64, options search.SearcherOptions, checkBoundaries bool) ( search.Searcher, error) { if bottomRightLon < topLeftLon { // cross date line, rewrite as two parts - leftSearcher, err := NewGeoBoundingBoxSearcher(indexReader, + leftSearcher, err := NewGeoBoundingBoxSearcher(ctx, indexReader, -180, bottomRightLat, bottomRightLon, topLeftLat, field, boost, options, checkBoundaries) if err != nil { return nil, err } - rightSearcher, err := NewGeoBoundingBoxSearcher(indexReader, + rightSearcher, err := NewGeoBoundingBoxSearcher(ctx, indexReader, topLeftLon, bottomRightLat, 180, topLeftLat, field, boost, options, checkBoundaries) if err != nil { @@ -73,7 +93,7 @@ func boxSearcher(indexReader index.IndexReader, return nil, err } - boxSearcher, err := NewDisjunctionSearcher(indexReader, + boxSearcher, err := NewDisjunctionSearcher(ctx, indexReader, []search.Searcher{leftSearcher, rightSearcher}, 0, options) if err != nil { _ = leftSearcher.Close() @@ -84,7 +104,7 @@ func boxSearcher(indexReader index.IndexReader, } // build geoboundingbox searcher for that bounding box - boxSearcher, err := NewGeoBoundingBoxSearcher(indexReader, + boxSearcher, err := NewGeoBoundingBoxSearcher(ctx, indexReader, topLeftLon, bottomRightLat, bottomRightLon, topLeftLat, field, boost, options, checkBoundaries) if err != nil { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go index 9f7e61d..1d6538a 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geopolygon.go @@ -15,35 +15,53 @@ package searcher import ( + "context" "fmt" + "math" + "github.com/blevesearch/bleve/v2/geo" "github.com/blevesearch/bleve/v2/numeric" "github.com/blevesearch/bleve/v2/search" index "github.com/blevesearch/bleve_index_api" - "math" ) -func NewGeoBoundedPolygonSearcher(indexReader index.IndexReader, - polygon []geo.Point, field string, boost float64, +func NewGeoBoundedPolygonSearcher(ctx context.Context, indexReader index.IndexReader, + coordinates []geo.Point, field string, boost float64, options search.SearcherOptions) (search.Searcher, error) { - - if len(polygon) < 3 { + if len(coordinates) < 3 { return nil, fmt.Errorf("Too few points specified for the polygon boundary") } - // compute the bounding box enclosing the polygon - topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, err := - geo.BoundingRectangleForPolygon(polygon) - if err != nil { - return nil, err + var rectSearcher search.Searcher + if sr, ok := indexReader.(index.SpatialIndexPlugin); ok { + tp, err := sr.GetSpatialAnalyzerPlugin("s2") + if err == nil { + terms := tp.GetQueryTokens(geo.NewBoundedPolygon(coordinates)) + rectSearcher, err = NewMultiTermSearcher(ctx, indexReader, terms, + field, boost, options, false) + if err != nil { + return nil, err + } + } } - // build a searcher for the bounding box on the polygon - boxSearcher, err := boxSearcher(indexReader, - topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, - field, boost, options, true) - if err != nil { - return nil, err + // indexes without the spatial plugin override would get + // initialized here. + if rectSearcher == nil { + // compute the bounding box enclosing the polygon + topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, err := + geo.BoundingRectangleForPolygon(coordinates) + if err != nil { + return nil, err + } + + // build a searcher for the bounding box on the polygon + rectSearcher, err = boxSearcher(ctx, indexReader, + topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, + field, boost, options, true) + if err != nil { + return nil, err + } } dvReader, err := indexReader.DocValueReader([]string{field}) @@ -52,8 +70,8 @@ func NewGeoBoundedPolygonSearcher(indexReader index.IndexReader, } // wrap it in a filtering searcher that checks for the polygon inclusivity - return NewFilteringSearcher(boxSearcher, - buildPolygonFilter(dvReader, field, polygon)), nil + return NewFilteringSearcher(ctx, rectSearcher, + buildPolygonFilter(dvReader, field, coordinates)), nil } const float64EqualityThreshold = 1e-6 @@ -66,7 +84,7 @@ func almostEqual(a, b float64) bool { // polygon. It is based on the ray-casting technique as referred // here: https://wrf.ecse.rpi.edu/nikola/pubdetails/pnpoly.html func buildPolygonFilter(dvReader index.DocValueReader, field string, - polygon []geo.Point) FilterFunc { + coordinates []geo.Point) FilterFunc { return func(d *search.DocumentMatch) bool { // check geo matches against all numeric type terms indexed var lons, lats []float64 @@ -89,8 +107,8 @@ func buildPolygonFilter(dvReader index.DocValueReader, field string, // Note: this approach works for points which are strictly inside // the polygon. ie it might fail for certain points on the polygon boundaries. if err == nil && found { - nVertices := len(polygon) - if len(polygon) < 3 { + nVertices := len(coordinates) + if len(coordinates) < 3 { return false } rayIntersectsSegment := func(point, a, b geo.Point) bool { @@ -100,19 +118,19 @@ func buildPolygonFilter(dvReader index.DocValueReader, field string, for i := range lons { pt := geo.Point{Lon: lons[i], Lat: lats[i]} - inside := rayIntersectsSegment(pt, polygon[len(polygon)-1], polygon[0]) + inside := rayIntersectsSegment(pt, coordinates[len(coordinates)-1], coordinates[0]) // check for a direct vertex match - if almostEqual(polygon[0].Lat, lats[i]) && - almostEqual(polygon[0].Lon, lons[i]) { + if almostEqual(coordinates[0].Lat, lats[i]) && + almostEqual(coordinates[0].Lon, lons[i]) { return true } for j := 1; j < nVertices; j++ { - if almostEqual(polygon[j].Lat, lats[i]) && - almostEqual(polygon[j].Lon, lons[i]) { + if almostEqual(coordinates[j].Lat, lats[i]) && + almostEqual(coordinates[j].Lon, lons[i]) { return true } - if rayIntersectsSegment(pt, polygon[j-1], polygon[j]) { + if rayIntersectsSegment(pt, coordinates[j-1], coordinates[j]) { inside = !inside } } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoshape.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoshape.go new file mode 100644 index 0000000..d2c6b1c --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_geoshape.go @@ -0,0 +1,124 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package searcher + +import ( + "bytes" + "context" + + "github.com/blevesearch/bleve/v2/geo" + "github.com/blevesearch/bleve/v2/search" + index "github.com/blevesearch/bleve_index_api" + "github.com/blevesearch/geo/geojson" +) + +func NewGeoShapeSearcher(ctx context.Context, indexReader index.IndexReader, shape index.GeoJSON, + relation string, field string, boost float64, + options search.SearcherOptions) (search.Searcher, error) { + var err error + var spatialPlugin index.SpatialAnalyzerPlugin + + // check for the spatial plugin from the index. + if sr, ok := indexReader.(index.SpatialIndexPlugin); ok { + spatialPlugin, _ = sr.GetSpatialAnalyzerPlugin("s2") + } + + if spatialPlugin == nil { + // fallback to the default spatial plugin(s2). + spatialPlugin = geo.GetSpatialAnalyzerPlugin("s2") + } + + // obtain the query tokens. + terms := spatialPlugin.GetQueryTokens(shape) + mSearcher, err := NewMultiTermSearcher(ctx, indexReader, terms, + field, boost, options, false) + if err != nil { + return nil, err + } + + dvReader, err := indexReader.DocValueReader([]string{field}) + if err != nil { + return nil, err + } + + return NewFilteringSearcher(ctx, mSearcher, + buildRelationFilterOnShapes(dvReader, field, relation, shape)), nil + +} + +// Using the same term splitter slice used in the doc values in zap. +// TODO: This needs to be revisited whenever we change the zap +// implementation of doc values. +var termSeparatorSplitSlice = []byte{0xff} + +func buildRelationFilterOnShapes(dvReader index.DocValueReader, field string, + relation string, shape index.GeoJSON) FilterFunc { + // this is for accumulating the shape's actual complete value + // spread across multiple docvalue visitor callbacks. + var dvShapeValue []byte + var startReading, finishReading bool + var reader *bytes.Reader + return func(d *search.DocumentMatch) bool { + var found bool + + err := dvReader.VisitDocValues(d.IndexInternalID, + func(field string, term []byte) { + + // only consider the values which are GlueBytes prefixed or + // if it had already started reading the shape bytes from previous callbacks. + if startReading || len(term) > geo.GlueBytesOffset { + + if !startReading && bytes.Equal(geo.GlueBytes, term[:geo.GlueBytesOffset]) { + startReading = true + + if bytes.Equal(geo.GlueBytes, term[len(term)-geo.GlueBytesOffset:]) { + term = term[:len(term)-geo.GlueBytesOffset] + finishReading = true + } + + dvShapeValue = append(dvShapeValue, term[geo.GlueBytesOffset:]...) + + } else if startReading && !finishReading { + if len(term) > geo.GlueBytesOffset && + bytes.Equal(geo.GlueBytes, term[len(term)-geo.GlueBytesOffset:]) { + term = term[:len(term)-geo.GlueBytesOffset] + finishReading = true + } + + term = append(termSeparatorSplitSlice, term...) + dvShapeValue = append(dvShapeValue, term...) + } + + // apply the filter once the entire docvalue is finished reading. + if finishReading { + v, err := geojson.FilterGeoShapesOnRelation(shape, + dvShapeValue, relation, &reader) + if err == nil && v { + found = true + } + dvShapeValue = dvShapeValue[:0] + startReading = false + finishReading = false + } + } + }) + + if err == nil && found { + return found + } + + return false + } +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_ip_range.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_ip_range.go new file mode 100644 index 0000000..3826620 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_ip_range.go @@ -0,0 +1,68 @@ +// Copyright (c) 2014 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package searcher + +import ( + "context" + "net" + + "github.com/blevesearch/bleve/v2/search" + index "github.com/blevesearch/bleve_index_api" +) + +// netLimits returns the lo and hi bounds inside the network. +func netLimits(n *net.IPNet) (lo net.IP, hi net.IP) { + ones, bits := n.Mask.Size() + netNum := n.IP + if bits == net.IPv4len*8 { + netNum = netNum.To16() + ones += 8 * (net.IPv6len - net.IPv4len) + } + mask := net.CIDRMask(ones, 8*net.IPv6len) + lo = make(net.IP, net.IPv6len) + hi = make(net.IP, net.IPv6len) + for i := 0; i < net.IPv6len; i++ { + lo[i] = netNum[i] & mask[i] + hi[i] = lo[i] | ^mask[i] + } + return lo, hi +} + +func NewIPRangeSearcher(ctx context.Context, indexReader index.IndexReader, ipNet *net.IPNet, + field string, boost float64, options search.SearcherOptions) ( + search.Searcher, error) { + + lo, hi := netLimits(ipNet) + fieldDict, err := indexReader.FieldDictRange(field, lo, hi) + if err != nil { + return nil, err + } + defer fieldDict.Close() + + var terms []string + tfd, err := fieldDict.Next() + for err == nil && tfd != nil { + terms = append(terms, tfd.Term) + if tooManyClauses(len(terms)) { + return nil, tooManyClausesErr(field, len(terms)) + } + tfd, err = fieldDict.Next() + } + if err != nil { + return nil, err + } + + return NewMultiTermSearcher(ctx, indexReader, terms, field, boost, options, true) +} diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go index db8306e..57d8d07 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_match_all.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "reflect" "github.com/blevesearch/bleve/v2/search" @@ -37,7 +38,7 @@ type MatchAllSearcher struct { count uint64 } -func NewMatchAllSearcher(indexReader index.IndexReader, boost float64, options search.SearcherOptions) (*MatchAllSearcher, error) { +func NewMatchAllSearcher(ctx context.Context, indexReader index.IndexReader, boost float64, options search.SearcherOptions) (*MatchAllSearcher, error) { reader, err := indexReader.DocIDReaderAll() if err != nil { return nil, err @@ -48,6 +49,7 @@ func NewMatchAllSearcher(indexReader index.IndexReader, boost float64, options s return nil, err } scorer := scorer.NewConstantScorer(1.0, boost, options) + return &MatchAllSearcher{ indexReader: indexReader, reader: reader, diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_multi_term.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_multi_term.go index 523bf4b..913f99f 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_multi_term.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_multi_term.go @@ -15,41 +15,43 @@ package searcher import ( + "context" "fmt" + "github.com/blevesearch/bleve/v2/search" index "github.com/blevesearch/bleve_index_api" ) -func NewMultiTermSearcher(indexReader index.IndexReader, terms []string, +func NewMultiTermSearcher(ctx context.Context, indexReader index.IndexReader, terms []string, field string, boost float64, options search.SearcherOptions, limit bool) ( search.Searcher, error) { if tooManyClauses(len(terms)) { if optionsDisjunctionOptimizable(options) { - return optimizeMultiTermSearcher(indexReader, terms, field, boost, options) + return optimizeMultiTermSearcher(ctx, indexReader, terms, field, boost, options) } if limit { return nil, tooManyClausesErr(field, len(terms)) } } - qsearchers, err := makeBatchSearchers(indexReader, terms, field, boost, options) + qsearchers, err := makeBatchSearchers(ctx, indexReader, terms, field, boost, options) if err != nil { return nil, err } // build disjunction searcher of these ranges - return newMultiTermSearcherInternal(indexReader, qsearchers, field, boost, + return newMultiTermSearcherInternal(ctx, indexReader, qsearchers, field, boost, options, limit) } -func NewMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byte, +func NewMultiTermSearcherBytes(ctx context.Context, indexReader index.IndexReader, terms [][]byte, field string, boost float64, options search.SearcherOptions, limit bool) ( search.Searcher, error) { if tooManyClauses(len(terms)) { if optionsDisjunctionOptimizable(options) { - return optimizeMultiTermSearcherBytes(indexReader, terms, field, boost, options) + return optimizeMultiTermSearcherBytes(ctx, indexReader, terms, field, boost, options) } if limit { @@ -57,23 +59,23 @@ func NewMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byte, } } - qsearchers, err := makeBatchSearchersBytes(indexReader, terms, field, boost, options) + qsearchers, err := makeBatchSearchersBytes(ctx, indexReader, terms, field, boost, options) if err != nil { return nil, err } // build disjunction searcher of these ranges - return newMultiTermSearcherInternal(indexReader, qsearchers, field, boost, + return newMultiTermSearcherInternal(ctx, indexReader, qsearchers, field, boost, options, limit) } -func newMultiTermSearcherInternal(indexReader index.IndexReader, +func newMultiTermSearcherInternal(ctx context.Context, indexReader index.IndexReader, searchers []search.Searcher, field string, boost float64, options search.SearcherOptions, limit bool) ( search.Searcher, error) { // build disjunction searcher of these ranges - searcher, err := newDisjunctionSearcher(indexReader, searchers, 0, options, + searcher, err := newDisjunctionSearcher(ctx, indexReader, searchers, 0, options, limit) if err != nil { for _, s := range searchers { @@ -85,7 +87,7 @@ func newMultiTermSearcherInternal(indexReader index.IndexReader, return searcher, nil } -func optimizeMultiTermSearcher(indexReader index.IndexReader, terms []string, +func optimizeMultiTermSearcher(ctx context.Context, indexReader index.IndexReader, terms []string, field string, boost float64, options search.SearcherOptions) ( search.Searcher, error) { var finalSearcher search.Searcher @@ -98,7 +100,7 @@ func optimizeMultiTermSearcher(indexReader index.IndexReader, terms []string, batchTerms = terms terms = nil } - batch, err := makeBatchSearchers(indexReader, batchTerms, field, boost, options) + batch, err := makeBatchSearchers(ctx, indexReader, batchTerms, field, boost, options) if err != nil { return nil, err } @@ -112,7 +114,7 @@ func optimizeMultiTermSearcher(indexReader index.IndexReader, terms []string, } } } - finalSearcher, err = optimizeCompositeSearcher("disjunction:unadorned", + finalSearcher, err = optimizeCompositeSearcher(ctx, "disjunction:unadorned", indexReader, batch, options) // all searchers in batch should be closed, regardless of error or optimization failure // either we're returning, or continuing and only finalSearcher is needed for next loop @@ -127,7 +129,7 @@ func optimizeMultiTermSearcher(indexReader index.IndexReader, terms []string, return finalSearcher, nil } -func makeBatchSearchers(indexReader index.IndexReader, terms []string, field string, +func makeBatchSearchers(ctx context.Context, indexReader index.IndexReader, terms []string, field string, boost float64, options search.SearcherOptions) ([]search.Searcher, error) { qsearchers := make([]search.Searcher, len(terms)) @@ -140,7 +142,7 @@ func makeBatchSearchers(indexReader index.IndexReader, terms []string, field str } for i, term := range terms { var err error - qsearchers[i], err = NewTermSearcher(indexReader, term, field, boost, options) + qsearchers[i], err = NewTermSearcher(ctx, indexReader, term, field, boost, options) if err != nil { qsearchersClose() return nil, err @@ -149,7 +151,7 @@ func makeBatchSearchers(indexReader index.IndexReader, terms []string, field str return qsearchers, nil } -func optimizeMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byte, +func optimizeMultiTermSearcherBytes(ctx context.Context, indexReader index.IndexReader, terms [][]byte, field string, boost float64, options search.SearcherOptions) ( search.Searcher, error) { @@ -163,7 +165,7 @@ func optimizeMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byt batchTerms = terms terms = nil } - batch, err := makeBatchSearchersBytes(indexReader, batchTerms, field, boost, options) + batch, err := makeBatchSearchersBytes(ctx, indexReader, batchTerms, field, boost, options) if err != nil { return nil, err } @@ -177,7 +179,7 @@ func optimizeMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byt } } } - finalSearcher, err = optimizeCompositeSearcher("disjunction:unadorned", + finalSearcher, err = optimizeCompositeSearcher(ctx, "disjunction:unadorned", indexReader, batch, options) // all searchers in batch should be closed, regardless of error or optimization failure // either we're returning, or continuing and only finalSearcher is needed for next loop @@ -192,7 +194,7 @@ func optimizeMultiTermSearcherBytes(indexReader index.IndexReader, terms [][]byt return finalSearcher, nil } -func makeBatchSearchersBytes(indexReader index.IndexReader, terms [][]byte, field string, +func makeBatchSearchersBytes(ctx context.Context, indexReader index.IndexReader, terms [][]byte, field string, boost float64, options search.SearcherOptions) ([]search.Searcher, error) { qsearchers := make([]search.Searcher, len(terms)) @@ -205,7 +207,7 @@ func makeBatchSearchersBytes(indexReader index.IndexReader, terms [][]byte, fiel } for i, term := range terms { var err error - qsearchers[i], err = NewTermSearcherBytes(indexReader, term, field, boost, options) + qsearchers[i], err = NewTermSearcherBytes(ctx, indexReader, term, field, boost, options) if err != nil { qsearchersClose() return nil, err diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_numeric_range.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_numeric_range.go index 6ab5147..68728c9 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_numeric_range.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_numeric_range.go @@ -16,6 +16,7 @@ package searcher import ( "bytes" + "context" "math" "sort" @@ -24,7 +25,7 @@ import ( index "github.com/blevesearch/bleve_index_api" ) -func NewNumericRangeSearcher(indexReader index.IndexReader, +func NewNumericRangeSearcher(ctx context.Context, indexReader index.IndexReader, min *float64, max *float64, inclusiveMin, inclusiveMax *bool, field string, boost float64, options search.SearcherOptions) (search.Searcher, error) { // account for unbounded edges @@ -55,6 +56,7 @@ func NewNumericRangeSearcher(indexReader index.IndexReader, } var fieldDict index.FieldDictContains + var dictBytesRead uint64 var isIndexed filterFunc var err error if irr, ok := indexReader.(index.IndexReaderContains); ok { @@ -67,6 +69,8 @@ func NewNumericRangeSearcher(indexReader index.IndexReader, found, err := fieldDict.Contains(term) return err == nil && found } + + dictBytesRead = fieldDict.BytesRead() } // FIXME hard-coded precision, should match field declaration @@ -81,10 +85,16 @@ func NewNumericRangeSearcher(indexReader index.IndexReader, } if len(terms) < 1 { + // reporting back the IO stats with respect to the dictionary + // loaded, using the context + if ctx != nil { + reportIOStats(dictBytesRead, ctx) + } + // cannot return MatchNoneSearcher because of interaction with // commit f391b991c20f02681bacd197afc6d8aed444e132 - return NewMultiTermSearcherBytes(indexReader, terms, field, boost, options, - true) + return NewMultiTermSearcherBytes(ctx, indexReader, terms, field, + boost, options, true) } // for upside_down @@ -99,8 +109,12 @@ func NewNumericRangeSearcher(indexReader index.IndexReader, return nil, tooManyClausesErr(field, len(terms)) } - return NewMultiTermSearcherBytes(indexReader, terms, field, boost, options, - true) + if ctx != nil { + reportIOStats(dictBytesRead, ctx) + } + + return NewMultiTermSearcherBytes(ctx, indexReader, terms, field, + boost, options, true) } func filterCandidateTerms(indexReader index.IndexReader, diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go index c262fd9..087ad76 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_phrase.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "fmt" "math" "reflect" @@ -63,22 +64,22 @@ func (s *PhraseSearcher) Size() int { return sizeInBytes } -func NewPhraseSearcher(indexReader index.IndexReader, terms []string, field string, options search.SearcherOptions) (*PhraseSearcher, error) { +func NewPhraseSearcher(ctx context.Context, indexReader index.IndexReader, terms []string, field string, options search.SearcherOptions) (*PhraseSearcher, error) { // turn flat terms []string into [][]string mterms := make([][]string, len(terms)) for i, term := range terms { mterms[i] = []string{term} } - return NewMultiPhraseSearcher(indexReader, mterms, field, options) + return NewMultiPhraseSearcher(ctx, indexReader, mterms, field, options) } -func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, field string, options search.SearcherOptions) (*PhraseSearcher, error) { +func NewMultiPhraseSearcher(ctx context.Context, indexReader index.IndexReader, terms [][]string, field string, options search.SearcherOptions) (*PhraseSearcher, error) { options.IncludeTermVectors = true var termPositionSearchers []search.Searcher for _, termPos := range terms { if len(termPos) == 1 && termPos[0] != "" { // single term - ts, err := NewTermSearcher(indexReader, termPos[0], field, 1.0, options) + ts, err := NewTermSearcher(ctx, indexReader, termPos[0], field, 1.0, options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { @@ -94,7 +95,7 @@ func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, fie if term == "" { continue } - ts, err := NewTermSearcher(indexReader, term, field, 1.0, options) + ts, err := NewTermSearcher(ctx, indexReader, term, field, 1.0, options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { @@ -104,7 +105,7 @@ func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, fie } termSearchers = append(termSearchers, ts) } - disjunction, err := NewDisjunctionSearcher(indexReader, termSearchers, 1, options) + disjunction, err := NewDisjunctionSearcher(ctx, indexReader, termSearchers, 1, options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { @@ -116,7 +117,7 @@ func NewMultiPhraseSearcher(indexReader index.IndexReader, terms [][]string, fie } } - mustSearcher, err := NewConjunctionSearcher(indexReader, termPositionSearchers, options) + mustSearcher, err := NewConjunctionSearcher(ctx, indexReader, termPositionSearchers, options) if err != nil { // close any searchers already opened for _, ts := range termPositionSearchers { diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go index 81b1cf7..b419d54 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_regexp.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "regexp" "github.com/blevesearch/bleve/v2/search" @@ -34,7 +35,7 @@ type Regexp interface { // NewRegexpStringSearcher is similar to NewRegexpSearcher, but // additionally optimizes for index readers that handle regexp's. -func NewRegexpStringSearcher(indexReader index.IndexReader, pattern string, +func NewRegexpStringSearcher(ctx context.Context, indexReader index.IndexReader, pattern string, field string, boost float64, options search.SearcherOptions) ( search.Searcher, error) { ir, ok := indexReader.(index.IndexReaderRegexp) @@ -44,7 +45,7 @@ func NewRegexpStringSearcher(indexReader index.IndexReader, pattern string, return nil, err } - return NewRegexpSearcher(indexReader, r, field, boost, options) + return NewRegexpSearcher(ctx, indexReader, r, field, boost, options) } fieldDict, err := ir.FieldDictRegexp(field, pattern) @@ -68,7 +69,7 @@ func NewRegexpStringSearcher(indexReader index.IndexReader, pattern string, return nil, err } - return NewMultiTermSearcher(indexReader, candidateTerms, field, boost, + return NewMultiTermSearcher(ctx, indexReader, candidateTerms, field, boost, options, true) } @@ -77,31 +78,47 @@ func NewRegexpStringSearcher(indexReader index.IndexReader, pattern string, // matching the entire term. The provided regexp SHOULD NOT start with ^ // or end with $ as this can intefere with the implementation. Separately, // matches will be checked to ensure they match the entire term. -func NewRegexpSearcher(indexReader index.IndexReader, pattern Regexp, +func NewRegexpSearcher(ctx context.Context, indexReader index.IndexReader, pattern Regexp, field string, boost float64, options search.SearcherOptions) ( search.Searcher, error) { var candidateTerms []string - + var regexpCandidates *regexpCandidates prefixTerm, complete := pattern.LiteralPrefix() if complete { // there is no pattern candidateTerms = []string{prefixTerm} } else { var err error - candidateTerms, err = findRegexpCandidateTerms(indexReader, pattern, field, + regexpCandidates, err = findRegexpCandidateTerms(indexReader, pattern, field, prefixTerm) if err != nil { return nil, err } } + var dictBytesRead uint64 + if regexpCandidates != nil { + candidateTerms = regexpCandidates.candidates + dictBytesRead = regexpCandidates.bytesRead + } + + if ctx != nil { + reportIOStats(dictBytesRead, ctx) + } - return NewMultiTermSearcher(indexReader, candidateTerms, field, boost, + return NewMultiTermSearcher(ctx, indexReader, candidateTerms, field, boost, options, true) } +type regexpCandidates struct { + candidates []string + bytesRead uint64 +} + func findRegexpCandidateTerms(indexReader index.IndexReader, - pattern Regexp, field, prefixTerm string) (rv []string, err error) { - rv = make([]string, 0) + pattern Regexp, field, prefixTerm string) (rv *regexpCandidates, err error) { + rv = ®expCandidates{ + candidates: make([]string, 0), + } var fieldDict index.FieldDict if len(prefixTerm) > 0 { fieldDict, err = indexReader.FieldDictPrefix(field, []byte(prefixTerm)) @@ -119,13 +136,13 @@ func findRegexpCandidateTerms(indexReader index.IndexReader, for err == nil && tfd != nil { matchPos := pattern.FindStringIndex(tfd.Term) if matchPos != nil && matchPos[0] == 0 && matchPos[1] == len(tfd.Term) { - rv = append(rv, tfd.Term) - if tooManyClauses(len(rv)) { - return rv, tooManyClausesErr(field, len(rv)) + rv.candidates = append(rv.candidates, tfd.Term) + if tooManyClauses(len(rv.candidates)) { + return rv, tooManyClausesErr(field, len(rv.candidates)) } } tfd, err = fieldDict.Next() } - + rv.bytesRead = fieldDict.BytesRead() return rv, err } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go index 55c18d1..db18e53 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term.go @@ -15,6 +15,7 @@ package searcher import ( + "context" "reflect" "github.com/blevesearch/bleve/v2/search" @@ -37,13 +38,13 @@ type TermSearcher struct { tfd index.TermFieldDoc } -func NewTermSearcher(indexReader index.IndexReader, term string, field string, boost float64, options search.SearcherOptions) (*TermSearcher, error) { - return NewTermSearcherBytes(indexReader, []byte(term), field, boost, options) +func NewTermSearcher(ctx context.Context, indexReader index.IndexReader, term string, field string, boost float64, options search.SearcherOptions) (*TermSearcher, error) { + return NewTermSearcherBytes(ctx, indexReader, []byte(term), field, boost, options) } -func NewTermSearcherBytes(indexReader index.IndexReader, term []byte, field string, boost float64, options search.SearcherOptions) (*TermSearcher, error) { +func NewTermSearcherBytes(ctx context.Context, indexReader index.IndexReader, term []byte, field string, boost float64, options search.SearcherOptions) (*TermSearcher, error) { needFreqNorm := options.Score != "none" - reader, err := indexReader.TermFieldReader(term, field, needFreqNorm, needFreqNorm, options.IncludeTermVectors) + reader, err := indexReader.TermFieldReader(ctx, term, field, needFreqNorm, needFreqNorm, options.IncludeTermVectors) if err != nil { return nil, err } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_prefix.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_prefix.go index a01b186..89f836a 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_prefix.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_prefix.go @@ -15,11 +15,13 @@ package searcher import ( + "context" + "github.com/blevesearch/bleve/v2/search" index "github.com/blevesearch/bleve_index_api" ) -func NewTermPrefixSearcher(indexReader index.IndexReader, prefix string, +func NewTermPrefixSearcher(ctx context.Context, indexReader index.IndexReader, prefix string, field string, boost float64, options search.SearcherOptions) ( search.Searcher, error) { // find the terms with this prefix @@ -46,5 +48,9 @@ func NewTermPrefixSearcher(indexReader index.IndexReader, prefix string, return nil, err } - return NewMultiTermSearcher(indexReader, terms, field, boost, options, true) + if ctx != nil { + reportIOStats(fieldDict.BytesRead(), ctx) + } + + return NewMultiTermSearcher(ctx, indexReader, terms, field, boost, options, true) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go index 5ef58f7..a2fb4e9 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/searcher/search_term_range.go @@ -15,11 +15,13 @@ package searcher import ( + "context" + "github.com/blevesearch/bleve/v2/search" index "github.com/blevesearch/bleve_index_api" ) -func NewTermRangeSearcher(indexReader index.IndexReader, +func NewTermRangeSearcher(ctx context.Context, indexReader index.IndexReader, min, max []byte, inclusiveMin, inclusiveMax *bool, field string, boost float64, options search.SearcherOptions) (search.Searcher, error) { @@ -81,5 +83,9 @@ func NewTermRangeSearcher(indexReader index.IndexReader, terms = terms[:len(terms)-1] } - return NewMultiTermSearcher(indexReader, terms, field, boost, options, true) + if ctx != nil { + reportIOStats(fieldDict.BytesRead(), ctx) + } + + return NewMultiTermSearcher(ctx, indexReader, terms, field, boost, options, true) } diff --git a/vendor/github.com/blevesearch/bleve/v2/search/sort.go b/vendor/github.com/blevesearch/bleve/v2/search/sort.go index 496db47..9ed9a78 100644 --- a/vendor/github.com/blevesearch/bleve/v2/search/sort.go +++ b/vendor/github.com/blevesearch/bleve/v2/search/sort.go @@ -21,12 +21,13 @@ import ( "math" "sort" "strings" + "unicode/utf8" "github.com/blevesearch/bleve/v2/geo" "github.com/blevesearch/bleve/v2/numeric" ) -var HighTerm = strings.Repeat(string([]byte{0xff}), 10) +var HighTerm = strings.Repeat(string(utf8.MaxRune), 3) var LowTerm = string([]byte{0x00}) type SearchSort interface { diff --git a/vendor/github.com/blevesearch/bleve_index_api/directory.go b/vendor/github.com/blevesearch/bleve_index_api/directory.go new file mode 100644 index 0000000..709a384 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve_index_api/directory.go @@ -0,0 +1,23 @@ +// Copyright (c) 2021 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "io" +) + +type Directory interface { + GetWriter(filePath string) (io.WriteCloser, error) +} diff --git a/vendor/github.com/blevesearch/bleve_index_api/document.go b/vendor/github.com/blevesearch/bleve_index_api/document.go index a6d0527..416d701 100644 --- a/vendor/github.com/blevesearch/bleve_index_api/document.go +++ b/vendor/github.com/blevesearch/bleve_index_api/document.go @@ -27,6 +27,8 @@ type Document interface { NumPlainTextBytes() uint64 AddIDField() + + StoredFieldsBytes() uint64 } type FieldVisitor func(Field) @@ -76,3 +78,16 @@ type GeoPointField interface { Lon() (float64, error) Lat() (float64, error) } + +type GeoShapeField interface { + GeoShape() (GeoJSON, error) +} + +// TokenizableSpatialField is an optional interface for fields that +// supports pluggable custom hierarchial spatial token generation. +type TokenizableSpatialField interface { + // SetSpatialAnalyzerPlugin lets the index implementations to + // initialise relevant spatial analyzer plugins for the field + // to override the spatial token generations during the analysis phase. + SetSpatialAnalyzerPlugin(SpatialAnalyzerPlugin) +} diff --git a/vendor/github.com/blevesearch/bleve_index_api/go.mod b/vendor/github.com/blevesearch/bleve_index_api/go.mod index f129fb5..47c10b2 100644 --- a/vendor/github.com/blevesearch/bleve_index_api/go.mod +++ b/vendor/github.com/blevesearch/bleve_index_api/go.mod @@ -1,3 +1,3 @@ module github.com/blevesearch/bleve_index_api -go 1.13 +go 1.18 diff --git a/vendor/github.com/blevesearch/bleve_index_api/index.go b/vendor/github.com/blevesearch/bleve_index_api/index.go index 1497919..4c916d5 100644 --- a/vendor/github.com/blevesearch/bleve_index_api/index.go +++ b/vendor/github.com/blevesearch/bleve_index_api/index.go @@ -16,6 +16,7 @@ package index import ( "bytes" + "context" "reflect" ) @@ -48,7 +49,7 @@ type Index interface { } type IndexReader interface { - TermFieldReader(term []byte, field string, includeFreq, includeNorm, includeTermVectors bool) (TermFieldReader, error) + TermFieldReader(ctx context.Context, term []byte, field string, includeFreq, includeNorm, includeTermVectors bool) (TermFieldReader, error) // DocIDReader returns an iterator over all doc ids // The caller must close returned instance to release associated resources. @@ -90,6 +91,14 @@ type IndexReaderContains interface { FieldDictContains(field string) (FieldDictContains, error) } +// SpatialIndexPlugin is an optional interface for exposing the +// support for any custom analyzer plugins that are capable of +// generating hierarchial spatial tokens for both indexing and +// query purposes from the geo location data. +type SpatialIndexPlugin interface { + GetSpatialAnalyzerPlugin(typ string) (SpatialAnalyzerPlugin, error) +} + type TermFieldVector struct { Field string ArrayPositions []uint64 @@ -174,10 +183,14 @@ type DictEntry struct { type FieldDict interface { Next() (*DictEntry, error) Close() error + + BytesRead() uint64 } type FieldDictContains interface { Contains(key []byte) (bool, error) + + BytesRead() uint64 } // DocIDReader is the interface exposing enumeration of documents identifiers. @@ -202,6 +215,8 @@ type DocValueVisitor func(field string, term []byte) type DocValueReader interface { VisitDocValues(id IndexInternalID, visitor DocValueVisitor) error + + BytesRead() uint64 } // IndexBuilder is an interface supported by some index schemes diff --git a/vendor/github.com/blevesearch/bleve_index_api/spatial_plugin.go b/vendor/github.com/blevesearch/bleve_index_api/spatial_plugin.go new file mode 100644 index 0000000..bee0476 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve_index_api/spatial_plugin.go @@ -0,0 +1,47 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +// SpatialAnalyzerPlugin is an interface for the custom spatial +// tokenizer implementations that supports the generation of spatial +// hierarchial tokens for both indexing and querying of geoJSON data. +type SpatialAnalyzerPlugin interface { + // Type returns the plugin type. eg: "s2". + Type() string + + // GetIndexTokens returns the tokens to be indexed for the + // given GeoJSON type data in the document. + GetIndexTokens(GeoJSON) []string + + // GetQueryTokens returns the tokens to be queried for the + // given GeoJSON type data in the document. + GetQueryTokens(GeoJSON) []string +} + +// GeoJSON is generic interface for any geoJSON shapes like +// points, polygon etc. +type GeoJSON interface { + // Returns the type of geoJSON shape. + Type() string + + // Checks whether the given shape intersects with current shape. + Intersects(GeoJSON) (bool, error) + + // Checks whether the given shape resides within the current shape. + Contains(GeoJSON) (bool, error) + + // Value returns the byte value for the shape. + Value() ([]byte, error) +} diff --git a/vendor/github.com/blevesearch/geo/LICENSE b/vendor/github.com/blevesearch/geo/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/blevesearch/geo/geojson/geojson_s2_util.go b/vendor/github.com/blevesearch/geo/geojson/geojson_s2_util.go new file mode 100644 index 0000000..60d9aa8 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/geojson/geojson_s2_util.go @@ -0,0 +1,319 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package geojson + +import ( + "strconv" + "strings" + + index "github.com/blevesearch/bleve_index_api" + "github.com/blevesearch/geo/s2" + "github.com/golang/geo/s1" +) + +// ------------------------------------------------------------------------ + +func polylineIntersectsPoint(pls []*s2.Polyline, + point *s2.Point) bool { + s2cell := s2.CellFromPoint(*point) + + for _, pl := range pls { + if pl.IntersectsCell(s2cell) { + return true + } + } + + return false +} + +func polylineIntersectsPolygons(pls []*s2.Polyline, + s2pgns []*s2.Polygon) bool { + // Early exit if the polygon contains any of the line's vertices. + for _, pl := range pls { + for i := 0; i < pl.NumEdges(); i++ { + edge := pl.Edge(i) + for _, s2pgn := range s2pgns { + if s2pgn.IntersectsCell(s2.CellFromPoint(edge.V0)) || + s2pgn.IntersectsCell(s2.CellFromPoint(edge.V1)) { + return true + } + } + } + } + + for _, pl := range pls { + for _, s2pgn := range s2pgns { + for i := 0; i < pl.NumEdges(); i++ { + for i := 0; i < s2pgn.NumEdges(); i++ { + edgeB := s2pgn.Edge(i) + latLng1 := s2.LatLngFromPoint(edgeB.V0) + latLng2 := s2.LatLngFromPoint(edgeB.V1) + pl2 := s2.PolylineFromLatLngs([]s2.LatLng{latLng1, latLng2}) + + if pl.Intersects(pl2) { + return true + } + } + } + } + } + + return false +} + +func geometryCollectionIntersectsShape(gc *GeometryCollection, + shapeIn index.GeoJSON) bool { + for _, shape := range gc.Members() { + intersects, err := shapeIn.Intersects(shape) + if err == nil && intersects { + return true + } + } + return false +} + +func polygonsContainsLineStrings(s2pgns []*s2.Polygon, + pls []*s2.Polyline) bool { + linesWithIn := make(map[int]struct{}) + checker := s2.NewCrossingEdgeQuery(s2.NewShapeIndex()) +nextLine: + for lineIndex, pl := range pls { + for i := 0; i < len(*pl)-1; i++ { + start := (*pl)[i] + end := (*pl)[i+1] + + for _, s2pgn := range s2pgns { + containsStart := s2pgn.ContainsPoint(start) + containsEnd := s2pgn.ContainsPoint(end) + if containsStart && containsEnd { + crossings := checker.Crossings(start, end, s2pgn, s2.CrossingTypeInterior) + if len(crossings) > 0 { + continue nextLine + } + linesWithIn[lineIndex] = struct{}{} + continue nextLine + } else { + for _, loop := range s2pgn.Loops() { + for i := 0; i < loop.NumVertices(); i++ { + if !containsStart && start.ApproxEqual(loop.Vertex(i)) { + containsStart = true + } else if !containsEnd && end.ApproxEqual(loop.Vertex(i)) { + containsEnd = true + } + if containsStart && containsEnd { + linesWithIn[lineIndex] = struct{}{} + continue nextLine + } + } + } + } + } + } + } + + return len(pls) == len(linesWithIn) +} + +func rectangleIntersectsWithPolygons(s2rect *s2.Rect, + s2pgns []*s2.Polygon) bool { + s2pgnFromRect := s2PolygonFromS2Rectangle(s2rect) + for _, s2pgn := range s2pgns { + if s2pgn.Intersects(s2pgnFromRect) { + return true + } + } + + return false +} + +func rectangleIntersectsWithLineStrings(s2rect *s2.Rect, + polylines []*s2.Polyline) bool { + // Early exit path if the envelope contains any of the linestring's vertices. + for _, pl := range polylines { + for i := 0; i < pl.NumEdges(); i++ { + edge := pl.Edge(i) + if s2rect.IntersectsCell(s2.CellFromPoint(edge.V0)) || + s2rect.IntersectsCell(s2.CellFromPoint(edge.V1)) { + return true + } + } + } + + for _, pl := range polylines { + for i := 0; i < pl.NumEdges(); i++ { + for j := 0; j < 4; j++ { + pl2 := s2.PolylineFromLatLngs([]s2.LatLng{s2rect.Vertex(j), + s2rect.Vertex((j + 1) % 4)}) + + if pl.Intersects(pl2) { + return true + } + } + } + } + + return false +} + +func s2PolygonFromCoordinates(coordinates [][][]float64) *s2.Polygon { + loops := make([]*s2.Loop, 0, len(coordinates)) + for _, loop := range coordinates { + var points []s2.Point + if loop[0][0] == loop[len(loop)-1][0] && loop[0][1] == loop[len(loop)-1][1] { + loop = loop[:len(loop)-1] + } + for _, point := range loop { + p := s2.PointFromLatLng(s2.LatLngFromDegrees(point[1], point[0])) + points = append(points, p) + } + s2loop := s2.LoopFromPoints(points) + loops = append(loops, s2loop) + } + + rv := s2.PolygonFromOrientedLoops(loops) + return rv +} + +func s2PolygonFromS2Rectangle(s2rect *s2.Rect) *s2.Polygon { + loops := make([]*s2.Loop, 0, 1) + var points []s2.Point + for j := 0; j < 4; j++ { + points = append(points, s2.PointFromLatLng(s2rect.Vertex(j%4))) + } + + loops = append(loops, s2.LoopFromPoints(points)) + return s2.PolygonFromLoops(loops) +} + +func DeduplicateTerms(terms []string) []string { + var rv []string + hash := make(map[string]struct{}, len(terms)) + for _, term := range terms { + if _, exists := hash[term]; !exists { + rv = append(rv, term) + hash[term] = struct{}{} + } + } + + return rv +} + +//---------------------------------------------------------------------- + +var earthRadiusInMeter = 6378137.0 + +func radiusInMetersToS1Angle(radius float64) s1.Angle { + return s1.Angle(radius / earthRadiusInMeter) +} + +func s2PolylinesFromCoordinates(coordinates [][][]float64) []*s2.Polyline { + var polylines []*s2.Polyline + for _, lines := range coordinates { + var latlngs []s2.LatLng + for _, line := range lines { + v := s2.LatLngFromDegrees(line[1], line[0]) + latlngs = append(latlngs, v) + } + polylines = append(polylines, s2.PolylineFromLatLngs(latlngs)) + } + return polylines +} + +func s2RectFromBounds(topLeft, bottomRight []float64) *s2.Rect { + rect := s2.EmptyRect() + rect = rect.AddPoint(s2.LatLngFromDegrees(topLeft[1], topLeft[0])) + rect = rect.AddPoint(s2.LatLngFromDegrees(bottomRight[1], bottomRight[0])) + return &rect +} + +func s2Cap(vertices []float64, radiusInMeter float64) *s2.Cap { + cp := s2.PointFromLatLng(s2.LatLngFromDegrees(vertices[1], vertices[0])) + angle := radiusInMetersToS1Angle(float64(radiusInMeter)) + cap := s2.CapFromCenterAngle(cp, angle) + return &cap +} + +func max(a, b float64) float64 { + if a >= b { + return a + } + return b +} + +func min(a, b float64) float64 { + if a >= b { + return b + } + return a +} + +func StripCoveringTerms(terms []string) []string { + rv := make([]string, 0, len(terms)) + for _, term := range terms { + if strings.HasPrefix(term, "$") { + rv = append(rv, term[1:]) + continue + } + rv = append(rv, term) + } + return DeduplicateTerms(rv) +} + +type distanceUnit struct { + conv float64 + suffixes []string +} + +var inch = distanceUnit{0.0254, []string{"in", "inch"}} +var yard = distanceUnit{0.9144, []string{"yd", "yards"}} +var feet = distanceUnit{0.3048, []string{"ft", "feet"}} +var kilom = distanceUnit{1000, []string{"km", "kilometers"}} +var nauticalm = distanceUnit{1852.0, []string{"nm", "nauticalmiles"}} +var millim = distanceUnit{0.001, []string{"mm", "millimeters"}} +var centim = distanceUnit{0.01, []string{"cm", "centimeters"}} +var miles = distanceUnit{1609.344, []string{"mi", "miles"}} +var meters = distanceUnit{1, []string{"m", "meters"}} + +var distanceUnits = []*distanceUnit{ + &inch, &yard, &feet, &kilom, &nauticalm, &millim, ¢im, &miles, &meters, +} + +// ParseDistance attempts to parse a distance string and return distance in +// meters. Example formats supported: +// "5in" "5inch" "7yd" "7yards" "9ft" "9feet" "11km" "11kilometers" +// "3nm" "3nauticalmiles" "13mm" "13millimeters" "15cm" "15centimeters" +// "17mi" "17miles" "19m" "19meters" +// If the unit cannot be determined, the entire string is parsed and the +// unit of meters is assumed. +// If the number portion cannot be parsed, 0 and the parse error are returned. +func ParseDistance(d string) (float64, error) { + for _, unit := range distanceUnits { + for _, unitSuffix := range unit.suffixes { + if strings.HasSuffix(d, unitSuffix) { + parsedNum, err := strconv.ParseFloat(d[0:len(d)-len(unitSuffix)], 64) + if err != nil { + return 0, err + } + return parsedNum * unit.conv, nil + } + } + } + // no unit matched, try assuming meters? + parsedNum, err := strconv.ParseFloat(d, 64) + if err != nil { + return 0, err + } + return parsedNum, nil +} diff --git a/vendor/github.com/blevesearch/geo/geojson/geojson_shapes_impl.go b/vendor/github.com/blevesearch/geo/geojson/geojson_shapes_impl.go new file mode 100644 index 0000000..3973458 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/geojson/geojson_shapes_impl.go @@ -0,0 +1,1861 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package geojson + +import ( + "bufio" + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "strings" + + index "github.com/blevesearch/bleve_index_api" + + "github.com/blevesearch/geo/s2" +) + +// s2Serializable is an optional interface for implementations +// supporting custom serialisation of data based out of s2's +// encode method. +type s2Serializable interface { + // Marshal implementation should encode the shape using the + // s2's encode methods with appropriate prefix bytes to + // identify the type of the contents. + Marshal() ([]byte, error) +} + +const ( + PointType = "point" + MultiPointType = "multipoint" + LineStringType = "linestring" + MultiLineStringType = "multilinestring" + PolygonType = "polygon" + MultiPolygonType = "multipolygon" + GeometryCollectionType = "geometrycollection" + CircleType = "circle" + EnvelopeType = "envelope" +) + +// These are the byte prefixes for identifying the +// shape contained within the doc values byte slice +// while decoding the contents during the query +// filtering phase. +const ( + PointTypePrefix = byte(1) + MultiPointTypePrefix = byte(2) + LineStringTypePrefix = byte(3) + MultiLineStringTypePrefix = byte(4) + PolygonTypePrefix = byte(5) + MultiPolygonTypePrefix = byte(6) + GeometryCollectionTypePrefix = byte(7) + CircleTypePrefix = byte(8) + EnvelopeTypePrefix = byte(9) +) + +// compositeShape is an optional interface for the +// composite geoJSON shapes which is composed of +// multiple spatial shapes within it. Composite shapes +// like multipoint, multilinestring, multipolygon and +// geometrycollection shapes are supposed to implement +// this interface. +type compositeShape interface { + // Members implementation returns the + // geoJSON shapes composed within the shape. + Members() []index.GeoJSON +} + +// -------------------------------------------------------- +// Point represents the geoJSON point type and it +// implements the index.GeoJSON interface. +type Point struct { + Typ string `json:"type"` + Vertices []float64 `json:"coordinates"` + s2point *s2.Point +} + +func (p *Point) Type() string { + return strings.ToLower(p.Typ) +} + +func (p *Point) Value() ([]byte, error) { + return jsoniter.Marshal(p) +} + +func NewGeoJsonPoint(v []float64) index.GeoJSON { + rv := &Point{Typ: PointType, Vertices: v} + rv.init() + return rv +} + +func (p *Point) init() { + if p.s2point == nil { + s2point := s2.PointFromLatLng(s2.LatLngFromDegrees( + p.Vertices[1], p.Vertices[0])) + p.s2point = &s2point + } +} + +func (p *Point) Marshal() ([]byte, error) { + p.init() + + var b bytes.Buffer + b.Grow(32) + w := bufio.NewWriter(&b) + err := p.s2point.Encode(w) + if err != nil { + return nil, err + } + + w.Flush() + return append([]byte{PointTypePrefix}, b.Bytes()...), nil +} + +func (p *Point) Intersects(other index.GeoJSON) (bool, error) { + p.init() + s2cell := s2.CellFromPoint(*p.s2point) + + return checkCellIntersectsShape(&s2cell, p, other) +} + +func (p *Point) Contains(other index.GeoJSON) (bool, error) { + p.init() + s2cell := s2.CellFromPoint(*p.s2point) + + return checkCellContainsShape([]*s2.Cell{&s2cell}, other) +} + +func (p *Point) Coordinates() []float64 { + return p.Vertices +} + +// -------------------------------------------------------- +// MultiPoint represents the geoJSON multipoint type and it +// implements the index.GeoJSON interface as well as the +// compositeShap interface. +type MultiPoint struct { + Typ string `json:"type"` + Vertices [][]float64 `json:"coordinates"` + s2points []*s2.Point +} + +func NewGeoJsonMultiPoint(v [][]float64) index.GeoJSON { + rv := &MultiPoint{Typ: MultiPointType, Vertices: v} + rv.init() + return rv +} + +func (mp *MultiPoint) init() { + if mp.s2points == nil { + mp.s2points = make([]*s2.Point, len(mp.Vertices)) + for i, point := range mp.Vertices { + s2point := s2.PointFromLatLng(s2.LatLngFromDegrees( + point[1], point[0])) + mp.s2points[i] = &s2point + } + } +} + +func (p *MultiPoint) Marshal() ([]byte, error) { + p.init() + + var b bytes.Buffer + b.Grow(64) + w := bufio.NewWriter(&b) + + // first write the number of points. + count := int32(len(p.s2points)) + err := binary.Write(w, binary.BigEndian, count) + if err != nil { + return nil, err + } + // write the points. + for _, s2point := range p.s2points { + err := s2point.Encode(w) + if err != nil { + return nil, err + } + } + + w.Flush() + return append([]byte{MultiPointTypePrefix}, b.Bytes()...), nil +} + +func (p *MultiPoint) Type() string { + return strings.ToLower(p.Typ) +} + +func (mp *MultiPoint) Value() ([]byte, error) { + return jsoniter.Marshal(mp) +} + +func (p *MultiPoint) Intersects(other index.GeoJSON) (bool, error) { + p.init() + + for _, s2point := range p.s2points { + cell := s2.CellFromPoint(*s2point) + rv, err := checkCellIntersectsShape(&cell, p, other) + if rv && err == nil { + return rv, nil + } + } + + return false, nil +} + +func (p *MultiPoint) Contains(other index.GeoJSON) (bool, error) { + p.init() + s2cells := make([]*s2.Cell, 0, len(p.s2points)) + + for _, s2point := range p.s2points { + cell := s2.CellFromPoint(*s2point) + s2cells = append(s2cells, &cell) + } + + return checkCellContainsShape(s2cells, other) +} + +func (p *MultiPoint) Coordinates() [][]float64 { + return p.Vertices +} + +func (p *MultiPoint) Members() []index.GeoJSON { + if len(p.Vertices) > 0 && len(p.s2points) == 0 { + points := make([]index.GeoJSON, len(p.Vertices)) + for pos, vertices := range p.Vertices { + points[pos] = NewGeoJsonPoint(vertices) + } + return points + } + + points := make([]index.GeoJSON, len(p.s2points)) + for pos, point := range p.s2points { + points[pos] = &Point{s2point: point} + } + return points +} + +// -------------------------------------------------------- +// LineString represents the geoJSON linestring type and it +// implements the index.GeoJSON interface. +type LineString struct { + Typ string `json:"type"` + Vertices [][]float64 `json:"coordinates"` + pl *s2.Polyline +} + +func NewGeoJsonLinestring(points [][]float64) index.GeoJSON { + rv := &LineString{Typ: LineStringType, Vertices: points} + rv.init() + return rv +} + +func (ls *LineString) init() { + if ls.pl == nil { + latlngs := make([]s2.LatLng, len(ls.Vertices)) + for i, vertex := range ls.Vertices { + latlngs[i] = s2.LatLngFromDegrees(vertex[1], vertex[0]) + } + ls.pl = s2.PolylineFromLatLngs(latlngs) + } +} + +func (ls *LineString) Type() string { + return strings.ToLower(ls.Typ) +} + +func (ls *LineString) Value() ([]byte, error) { + return jsoniter.Marshal(ls) +} + +func (ls *LineString) Marshal() ([]byte, error) { + ls.init() + + var b bytes.Buffer + b.Grow(50) + w := bufio.NewWriter(&b) + err := ls.pl.Encode(w) + if err != nil { + return nil, err + } + + w.Flush() + return append([]byte{LineStringTypePrefix}, b.Bytes()...), nil +} + +func (ls *LineString) Intersects(other index.GeoJSON) (bool, error) { + ls.init() + + return checkLineStringsIntersectsShape([]*s2.Polyline{ls.pl}, ls, other) +} + +func (ls *LineString) Contains(other index.GeoJSON) (bool, error) { + return checkLineStringsContainsShape([]*s2.Polyline{ls.pl}, other) +} + +func (ls *LineString) Coordinates() [][]float64 { + return ls.Vertices +} + +// -------------------------------------------------------- +// MultiLineString represents the geoJSON multilinestring type +// and it implements the index.GeoJSON interface as well as the +// compositeShap interface. +type MultiLineString struct { + Typ string `json:"type"` + Vertices [][][]float64 `json:"coordinates"` + pls []*s2.Polyline +} + +func NewGeoJsonMultilinestring(points [][][]float64) index.GeoJSON { + rv := &MultiLineString{Typ: MultiLineStringType, Vertices: points} + rv.init() + return rv +} + +func (mls *MultiLineString) init() { + if mls.pls == nil { + mls.pls = s2PolylinesFromCoordinates(mls.Vertices) + } +} + +func (mls *MultiLineString) Type() string { + return strings.ToLower(mls.Typ) +} + +func (mls *MultiLineString) Value() ([]byte, error) { + return jsoniter.Marshal(mls) +} + +func (mls *MultiLineString) Marshal() ([]byte, error) { + mls.init() + + var b bytes.Buffer + b.Grow(256) + w := bufio.NewWriter(&b) + + // first write the number of linestrings. + count := int32(len(mls.pls)) + err := binary.Write(w, binary.BigEndian, count) + if err != nil { + return nil, err + } + // write the lines. + for _, ls := range mls.pls { + err := ls.Encode(w) + if err != nil { + return nil, err + } + } + + w.Flush() + return append([]byte{MultiLineStringTypePrefix}, b.Bytes()...), nil +} + +func (p *MultiLineString) Intersects(other index.GeoJSON) (bool, error) { + p.init() + return checkLineStringsIntersectsShape(p.pls, p, other) +} + +func (p *MultiLineString) Contains(other index.GeoJSON) (bool, error) { + return checkLineStringsContainsShape(p.pls, other) +} + +func (p *MultiLineString) Coordinates() [][][]float64 { + return p.Vertices +} + +func (p *MultiLineString) Members() []index.GeoJSON { + if len(p.Vertices) > 0 && len(p.pls) == 0 { + lines := make([]index.GeoJSON, len(p.Vertices)) + for pos, vertices := range p.Vertices { + lines[pos] = NewGeoJsonLinestring(vertices) + } + return lines + } + + lines := make([]index.GeoJSON, len(p.pls)) + for pos, pl := range p.pls { + lines[pos] = &LineString{pl: pl} + } + return lines +} + +// -------------------------------------------------------- +// Polygon represents the geoJSON polygon type +// and it implements the index.GeoJSON interface. +type Polygon struct { + Typ string `json:"type"` + Vertices [][][]float64 `json:"coordinates"` + s2pgn *s2.Polygon +} + +func NewGeoJsonPolygon(points [][][]float64) index.GeoJSON { + rv := &Polygon{Typ: PolygonType, Vertices: points} + rv.init() + return rv +} + +func (p *Polygon) init() { + if p.s2pgn == nil { + p.s2pgn = s2PolygonFromCoordinates(p.Vertices) + } +} + +func (p *Polygon) Type() string { + return strings.ToLower(p.Typ) +} + +func (p *Polygon) Value() ([]byte, error) { + return jsoniter.Marshal(p) +} + +func (p *Polygon) Marshal() ([]byte, error) { + p.init() + + var b bytes.Buffer + b.Grow(128) + w := bufio.NewWriter(&b) + err := p.s2pgn.Encode(w) + if err != nil { + return nil, err + } + + w.Flush() + return append([]byte{PolygonTypePrefix}, b.Bytes()...), nil +} + +func (p *Polygon) Intersects(other index.GeoJSON) (bool, error) { + // make an s2polygon for reuse. + p.init() + + return checkPolygonIntersectsShape(p.s2pgn, p, other) +} + +func (p *Polygon) Contains(other index.GeoJSON) (bool, error) { + // make an s2polygon for reuse. + p.init() + + return checkMultiPolygonContainsShape([]*s2.Polygon{p.s2pgn}, p, other) +} + +func (p *Polygon) Coordinates() [][][]float64 { + return p.Vertices +} + +// -------------------------------------------------------- +// MultiPolygon represents the geoJSON multipolygon type +// and it implements the index.GeoJSON interface as well as the +// compositeShap interface. +type MultiPolygon struct { + Typ string `json:"type"` + Vertices [][][][]float64 `json:"coordinates"` + s2pgns []*s2.Polygon +} + +func NewGeoJsonMultiPolygon(points [][][][]float64) index.GeoJSON { + rv := &MultiPolygon{Typ: MultiPolygonType, Vertices: points} + rv.init() + return rv +} + +func (p *MultiPolygon) init() { + if p.s2pgns == nil { + p.s2pgns = make([]*s2.Polygon, len(p.Vertices)) + for i, vertices := range p.Vertices { + pgn := s2PolygonFromCoordinates(vertices) + p.s2pgns[i] = pgn + } + } +} + +func (p *MultiPolygon) Type() string { + return strings.ToLower(p.Typ) +} + +func (p *MultiPolygon) Value() ([]byte, error) { + return jsoniter.Marshal(p) +} + +func (p *MultiPolygon) Marshal() ([]byte, error) { + p.init() + + var b bytes.Buffer + b.Grow(512) + w := bufio.NewWriter(&b) + + // first write the number of polygons. + count := int32(len(p.s2pgns)) + err := binary.Write(w, binary.BigEndian, count) + if err != nil { + return nil, err + } + // write the polygons. + for _, pgn := range p.s2pgns { + err := pgn.Encode(w) + if err != nil { + return nil, err + } + } + + w.Flush() + return append([]byte{MultiPolygonTypePrefix}, b.Bytes()...), nil +} + +func (p *MultiPolygon) Intersects(other index.GeoJSON) (bool, error) { + p.init() + + for _, pgn := range p.s2pgns { + rv, err := checkPolygonIntersectsShape(pgn, p, other) + if rv && err == nil { + return true, nil + } + } + + return false, nil +} + +func (p *MultiPolygon) Contains(other index.GeoJSON) (bool, error) { + p.init() + + return checkMultiPolygonContainsShape(p.s2pgns, p, other) +} + +func (p *MultiPolygon) Coordinates() [][][][]float64 { + return p.Vertices +} + +func (p *MultiPolygon) Members() []index.GeoJSON { + if len(p.Vertices) > 0 && len(p.s2pgns) == 0 { + polygons := make([]index.GeoJSON, len(p.Vertices)) + for pos, vertices := range p.Vertices { + polygons[pos] = NewGeoJsonPolygon(vertices) + } + return polygons + } + + polygons := make([]index.GeoJSON, len(p.s2pgns)) + for pos, pgn := range p.s2pgns { + polygons[pos] = &Polygon{s2pgn: pgn} + } + return polygons +} + +// -------------------------------------------------------- +// GeometryCollection represents the geoJSON geometryCollection type +// and it implements the index.GeoJSON interface as well as the +// compositeShap interface. +type GeometryCollection struct { + Typ string `json:"type"` + Shapes []index.GeoJSON `json:"geometries"` +} + +func (gc *GeometryCollection) Type() string { + return strings.ToLower(gc.Typ) +} + +func (gc *GeometryCollection) Value() ([]byte, error) { + return jsoniter.Marshal(gc) +} + +func (gc *GeometryCollection) Members() []index.GeoJSON { + shapes := make([]index.GeoJSON, 0, len(gc.Shapes)) + for _, shape := range gc.Shapes { + if cs, ok := shape.(compositeShape); ok { + shapes = append(shapes, cs.Members()...) + } else { + shapes = append(shapes, shape) + } + } + return shapes +} + +func (gc *GeometryCollection) Marshal() ([]byte, error) { + var b bytes.Buffer + b.Grow(512) + w := bufio.NewWriter(&b) + + // first write the number of shapes. + count := int32(len(gc.Shapes)) + err := binary.Write(w, binary.BigEndian, count) + if err != nil { + return nil, err + } + + var res []byte + for _, shape := range gc.Shapes { + if s, ok := shape.(s2Serializable); ok { + sb, err := s.Marshal() + if err != nil { + return nil, err + } + // write the length of each shape. + err = binary.Write(w, binary.BigEndian, int32(len(sb))) + if err != nil { + return nil, err + } + // track the shape contents. + res = append(res, sb...) + } + } + w.Flush() + + return append([]byte{GeometryCollectionTypePrefix}, append(b.Bytes(), res...)...), nil +} + +func (gc *GeometryCollection) Intersects(other index.GeoJSON) (bool, error) { + for _, shape := range gc.Members() { + + intersects, err := shape.Intersects(other) + if intersects && err == nil { + return true, nil + } + } + return false, nil +} + +func (gc *GeometryCollection) Contains(other index.GeoJSON) (bool, error) { + // handle composite target shapes explicitly + if cs, ok := other.(compositeShape); ok { + otherShapes := cs.Members() + shapesFoundWithIn := make(map[int]struct{}) + + nextShape: + for pos, shapeInDoc := range otherShapes { + for _, shape := range gc.Members() { + within, err := shape.Contains(shapeInDoc) + if within && err == nil { + shapesFoundWithIn[pos] = struct{}{} + continue nextShape + } + } + } + + return len(shapesFoundWithIn) == len(otherShapes), nil + } + + for _, shape := range gc.Members() { + within, err := shape.Contains(other) + if within && err == nil { + return true, nil + } + } + + return false, nil +} + +func (gc *GeometryCollection) UnmarshalJSON(data []byte) error { + tmp := struct { + Typ string `json:"type"` + Shapes []json.RawMessage `json:"geometries"` + }{} + + err := jsoniter.Unmarshal(data, &tmp) + if err != nil { + return err + } + gc.Typ = tmp.Typ + + for _, shape := range tmp.Shapes { + var t map[string]interface{} + err := jsoniter.Unmarshal(shape, &t) + if err != nil { + return err + } + + var typ string + + if val, ok := t["type"]; ok { + typ = strings.ToLower(val.(string)) + } else { + continue + } + + switch typ { + case PointType: + var p Point + err := jsoniter.Unmarshal(shape, &p) + if err != nil { + return err + } + p.init() + gc.Shapes = append(gc.Shapes, &p) + + case MultiPointType: + var mp MultiPoint + err := jsoniter.Unmarshal(shape, &mp) + if err != nil { + return err + } + mp.init() + gc.Shapes = append(gc.Shapes, &mp) + + case LineStringType: + var ls LineString + err := jsoniter.Unmarshal(shape, &ls) + if err != nil { + return err + } + ls.init() + gc.Shapes = append(gc.Shapes, &ls) + + case MultiLineStringType: + var mls MultiLineString + err := jsoniter.Unmarshal(shape, &mls) + if err != nil { + return err + } + mls.init() + gc.Shapes = append(gc.Shapes, &mls) + + case PolygonType: + var pgn Polygon + err := jsoniter.Unmarshal(shape, &pgn) + if err != nil { + return err + } + pgn.init() + gc.Shapes = append(gc.Shapes, &pgn) + + case MultiPolygonType: + var pgn MultiPolygon + err := jsoniter.Unmarshal(shape, &pgn) + if err != nil { + return err + } + pgn.init() + gc.Shapes = append(gc.Shapes, &pgn) + } + } + + return nil +} + +// -------------------------------------------------------- +// Circle represents a custom circle type and it +// implements the index.GeoJSON interface. +type Circle struct { + Typ string `json:"type"` + Vertices []float64 `json:"coordinates"` + Radius string `json:"radius"` + radiusInMeters float64 + s2cap *s2.Cap +} + +func NewGeoCircle(points []float64, + radius string) index.GeoJSON { + r, err := ParseDistance(radius) + if err != nil { + return nil + } + + return &Circle{Typ: CircleType, + Vertices: points, + Radius: radius, + radiusInMeters: r} +} + +func (c *Circle) Type() string { + return strings.ToLower(c.Typ) +} + +func (c *Circle) Value() ([]byte, error) { + return jsoniter.Marshal(c) +} + +func (c *Circle) init() { + if c.s2cap == nil { + c.s2cap = s2Cap(c.Vertices, c.radiusInMeters) + } +} + +func (c *Circle) Marshal() ([]byte, error) { + c.init() + + var b bytes.Buffer + b.Grow(40) + w := bufio.NewWriter(&b) + err := c.s2cap.Encode(w) + if err != nil { + return nil, err + } + + w.Flush() + return append([]byte{CircleTypePrefix}, b.Bytes()...), nil +} + +func (c *Circle) Intersects(other index.GeoJSON) (bool, error) { + c.init() + + return checkCircleIntersectsShape(c.s2cap, c, other) +} + +func (c *Circle) Contains(other index.GeoJSON) (bool, error) { + c.init() + return checkCircleContainsShape(c.s2cap, c, other) +} + +func (c *Circle) UnmarshalJSON(data []byte) error { + tmp := struct { + Typ string `json:"type"` + Vertices []float64 `json:"coordinates"` + Radius string `json:"radius"` + }{} + + err := jsoniter.Unmarshal(data, &tmp) + if err != nil { + return err + } + c.Typ = tmp.Typ + c.Vertices = tmp.Vertices + c.Radius = tmp.Radius + if tmp.Radius != "" { + c.radiusInMeters, err = ParseDistance(tmp.Radius) + } + + return err +} + +// -------------------------------------------------------- +// Envelope represents the envelope/bounding box type and it +// implements the index.GeoJSON interface. +type Envelope struct { + Typ string `json:"type"` + Vertices [][]float64 `json:"coordinates"` + r *s2.Rect +} + +func NewGeoEnvelope(points [][]float64) index.GeoJSON { + return &Envelope{Vertices: points, Typ: EnvelopeType} +} + +func (e *Envelope) Type() string { + return strings.ToLower(e.Typ) +} + +func (e *Envelope) Value() ([]byte, error) { + return jsoniter.Marshal(e) +} + +func (e *Envelope) init() { + if e.r == nil { + e.r = s2RectFromBounds(e.Vertices[0], e.Vertices[1]) + } +} + +func (e *Envelope) Marshal() ([]byte, error) { + e.init() + + var b bytes.Buffer + b.Grow(50) + w := bufio.NewWriter(&b) + err := e.r.Encode(w) + if err != nil { + return nil, err + } + + w.Flush() + return append([]byte{EnvelopeTypePrefix}, b.Bytes()...), nil +} + +func (e *Envelope) Intersects(other index.GeoJSON) (bool, error) { + e.init() + + return checkEnvelopeIntersectsShape(e.r, e, other) +} + +func (e *Envelope) Contains(other index.GeoJSON) (bool, error) { + e.init() + + return checkEnvelopeContainsShape(e.r, e, other) +} + +//-------------------------------------------------------- + +// checkCellIntersectsShape checks for intersection between +// the s2cell and the shape in the document. +func checkCellIntersectsShape(cell *s2.Cell, shapeIn, + other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + s2cell := s2.CellFromPoint(*p2.s2point) + + if cell.IntersectsCell(s2cell) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + // check the intersection for any point in the array. + for _, point := range p2.s2points { + s2cell := s2.CellFromPoint(*point) + + if cell.IntersectsCell(s2cell) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a polygon. + if p2, ok := other.(*Polygon); ok { + + if p2.s2pgn.IntersectsCell(*cell) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipolygon. + if p2, ok := other.(*MultiPolygon); ok { + // check the intersection for any polygon in the collection. + for _, s2pgn := range p2.s2pgns { + + if s2pgn.IntersectsCell(*cell) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a linestring. + if p2, ok := other.(*LineString); ok { + for i := 0; i < p2.pl.NumEdges(); i++ { + edge := p2.pl.Edge(i) + start := s2.CellFromPoint(edge.V0) + end := s2.CellFromPoint(edge.V1) + if cell.IntersectsCell(start) || cell.IntersectsCell(end) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a multilinestring. + if p2, ok := other.(*MultiLineString); ok { + // check the intersection for any linestring in the array. + for _, pl := range p2.pls { + for i := 0; i < pl.NumEdges(); i++ { + edge := pl.Edge(i) + start := s2.CellFromPoint(edge.V0) + end := s2.CellFromPoint(edge.V1) + if cell.IntersectsCell(start) || cell.IntersectsCell(end) { + return true, nil + } + } + } + + return false, nil + } + + // check if the other shape is a geometrycollection. + if gc, ok := other.(*GeometryCollection); ok { + // check for intersection across every member shape. + if geometryCollectionIntersectsShape(gc, shapeIn) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + + if c.s2cap.IntersectsCell(*cell) { + return true, nil + } + + return false, nil + } + + // check if the other shape is an envelope. + if e, ok := other.(*Envelope); ok { + + if e.r.IntersectsCell(*cell) { + return true, nil + } + + return false, nil + } + + return false, fmt.Errorf("unknown geojson type: %s "+ + " found in document", other.Type()) +} + +// checkCellContainsShape checks whether the given shape in +// in the document is contained with the s2cell. +func checkCellContainsShape(cells []*s2.Cell, + other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + for _, cell := range cells { + + if cell.ContainsPoint(*p2.s2point) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a multipoint, if so containment is + // checked for every point in the multipoint with every given cells. + if p2, ok := other.(*MultiPoint); ok { + // check the containment for every point in the collection. + lookup := make(map[int]struct{}) + for _, cell := range cells { + for pos, point := range p2.s2points { + if _, done := lookup[pos]; done { + continue + } + // already processed all the points in the multipoint. + if len(lookup) == len(p2.s2points) { + return true, nil + } + + if cell.ContainsPoint(*point) { + lookup[pos] = struct{}{} + } + } + } + + return len(lookup) == len(p2.s2points), nil + } + + // as point is a non closed shape, containment isn't feasible + // for other higher dimensions. + return false, nil +} + +// ------------------------------------------------------------------------ + +// checkLineStringsIntersectsShape checks whether the given linestrings +// intersects with the shape in the document. +func checkLineStringsIntersectsShape(pls []*s2.Polyline, shapeIn, + other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + if polylineIntersectsPoint(pls, p2.s2point) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + // check the intersection for any point in the collection. + for _, point := range p2.s2points { + + if polylineIntersectsPoint(pls, point) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a polygon. + if p2, ok := other.(*Polygon); ok { + if polylineIntersectsPolygons(pls, []*s2.Polygon{p2.s2pgn}) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipolygon. + if p2, ok := other.(*MultiPolygon); ok { + // check the intersection for any polygon in the collection. + if polylineIntersectsPolygons(pls, p2.s2pgns) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a linestring. + if ls, ok := other.(*LineString); ok { + for _, pl := range pls { + if ls.pl.Intersects(pl) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a multilinestring. + if mls, ok := other.(*MultiLineString); ok { + for _, ls := range pls { + for _, docLineString := range mls.pls { + if ls.Intersects(docLineString) { + return true, nil + } + } + } + + return false, nil + } + + if gc, ok := other.(*GeometryCollection); ok { + // check whether the linestring intersects with any of the + // shapes Contains a geometrycollection. + if geometryCollectionIntersectsShape(gc, shapeIn) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + centre := c.s2cap.Center() + for _, pl := range pls { + for i := 0; i < pl.NumEdges(); i++ { + edge := pl.Edge(i) + distance := s2.DistanceFromSegment(centre, edge.V0, edge.V1) + return distance <= c.s2cap.Radius(), nil + } + } + + return false, nil + } + + // check if the other shape is a envelope. + if e, ok := other.(*Envelope); ok { + res := rectangleIntersectsWithLineStrings(e.r, pls) + + return res, nil + } + + return false, fmt.Errorf("unknown geojson type: %s "+ + "found in document", other.Type()) +} + +// checkLineStringsContainsShape checks the containment for +// points and multipoints for the linestring vertices. +func checkLineStringsContainsShape(pls []*s2.Polyline, + other index.GeoJSON) (bool, error) { + return false, nil +} + +// ------------------------------------------------------------------------ + +// checkPolygonIntersectsShape checks the intersection between the +// s2 polygon and the other shapes in the documents. +func checkPolygonIntersectsShape(s2pgn *s2.Polygon, shapeIn, + other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + + s2cell := s2.CellFromPoint(*p2.s2point) + if s2pgn.IntersectsCell(s2cell) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + + for _, s2point := range p2.s2points { + s2cell := s2.CellFromPoint(*s2point) + if s2pgn.IntersectsCell(s2cell) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a polygon. + if p2, ok := other.(*Polygon); ok { + + if s2pgn.Intersects(p2.s2pgn) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipolygon. + if p2, ok := other.(*MultiPolygon); ok { + // check the intersection for any polygon in the collection. + for _, s2pgn1 := range p2.s2pgns { + + if s2pgn.Intersects(s2pgn1) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a linestring. + if ls, ok := other.(*LineString); ok { + + if polylineIntersectsPolygons([]*s2.Polyline{ls.pl}, + []*s2.Polygon{s2pgn}) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multilinestring. + if mls, ok := other.(*MultiLineString); ok { + + if polylineIntersectsPolygons(mls.pls, []*s2.Polygon{s2pgn}) { + return true, nil + } + + return false, nil + } + + if gc, ok := other.(*GeometryCollection); ok { + // check whether the polygon intersects with any of the + // member shapes of the geometry collection. + if geometryCollectionIntersectsShape(gc, shapeIn) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + cp := c.s2cap.Center() + radius := c.s2cap.Radius() + + projected := s2pgn.Project(&cp) + distance := projected.Distance(cp) + + return distance <= radius, nil + } + + // check if the other shape is a envelope. + if e, ok := other.(*Envelope); ok { + + s2pgnInDoc := s2PolygonFromS2Rectangle(e.r) + if s2pgn.Intersects(s2pgnInDoc) { + return true, nil + } + return false, nil + } + + return false, fmt.Errorf("unknown geojson type: %s "+ + " found in document", other.Type()) +} + +// checkMultiPolygonContainsShape checks whether the given polygons +// collectively contains the shape in the document. +func checkMultiPolygonContainsShape(s2pgns []*s2.Polygon, + shapeIn, other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + + for _, s2pgn := range s2pgns { + if s2pgn.ContainsPoint(*p2.s2point) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + // check the containment for every point in the collection. + pointsWithIn := make(map[int]struct{}) + nextPoint: + for pointIndex, point := range p2.s2points { + + for _, s2pgn := range s2pgns { + if s2pgn.ContainsPoint(*point) { + pointsWithIn[pointIndex] = struct{}{} + continue nextPoint + } else { + // double check for containment with the vertices. + for _, loop := range s2pgn.Loops() { + for i := 0; i < loop.NumVertices(); i++ { + if point.ApproxEqual(loop.Vertex(i)) { + pointsWithIn[pointIndex] = struct{}{} + continue nextPoint + } + } + } + } + } + } + + return len(p2.s2points) == len(pointsWithIn), nil + } + + // check if the other shape is a polygon. + if p2, ok := other.(*Polygon); ok { + + for _, s2pgn := range s2pgns { + if s2pgn.Contains(p2.s2pgn) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a multipolygon. + if p2, ok := other.(*MultiPolygon); ok { + // check the intersection for every polygon in the collection. + polygonsWithIn := make(map[int]struct{}) + + nextPolygon: + for pgnIndex, pgn := range p2.s2pgns { + for _, s2pgn := range s2pgns { + if s2pgn.Contains(pgn) { + polygonsWithIn[pgnIndex] = struct{}{} + continue nextPolygon + } + } + } + + return len(p2.s2pgns) == len(polygonsWithIn), nil + } + + // check if the other shape is a linestring. + if ls, ok := other.(*LineString); ok { + + if polygonsContainsLineStrings(s2pgns, + []*s2.Polyline{ls.pl}) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multilinestring. + if mls, ok := other.(*MultiLineString); ok { + // check whether any of the linestring is inside the polygon. + if polygonsContainsLineStrings(s2pgns, mls.pls) { + return true, nil + } + + return false, nil + } + + if gc, ok := other.(*GeometryCollection); ok { + shapesWithIn := make(map[int]struct{}) + nextShape: + for pos, shape := range gc.Members() { + for _, s2pgn := range s2pgns { + contains, err := checkMultiPolygonContainsShape( + []*s2.Polygon{s2pgn}, shapeIn, shape) + if err == nil && contains { + shapesWithIn[pos] = struct{}{} + continue nextShape + } + } + } + return len(shapesWithIn) == len(gc.Members()), nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + cp := c.s2cap.Center() + radius := c.s2cap.Radius() + + for _, s2pgn := range s2pgns { + + if s2pgn.ContainsPoint(cp) { + projected := s2pgn.ProjectToBoundary(&cp) + distance := projected.Distance(cp) + if distance >= radius { + return true, nil + } + } + } + + return false, nil + } + + // check if the other shape is a envelope. + if e, ok := other.(*Envelope); ok { + // create a polygon from the rectangle and checks the containment. + s2pgnInDoc := s2PolygonFromS2Rectangle(e.r) + for _, s2pgn := range s2pgns { + if s2pgn.Contains(s2pgnInDoc) { + return true, nil + } + } + + return false, nil + } + + return false, fmt.Errorf("unknown geojson type: %s"+ + " found in document", other.Type()) +} + +// ------------------------------------------------------------------------ + +// checkCircleIntersectsShape checks for intersection of the +// shape in the document with the circle. +func checkCircleIntersectsShape(s2cap *s2.Cap, shapeIn, + other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + s2cell := s2.CellFromPoint(*p2.s2point) + + if s2cap.IntersectsCell(s2cell) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + // check the intersection for any point in the collection. + for _, point := range p2.s2points { + s2cell := s2.CellFromPoint(*point) + + if s2cap.IntersectsCell(s2cell) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a polygon. + if p2, ok := other.(*Polygon); ok { + centerPoint := s2cap.Center() + projected := p2.s2pgn.Project(¢erPoint) + distance := projected.Distance(centerPoint) + return distance <= s2cap.Radius(), nil + } + + // check if the other shape is a multipolygon. + if p2, ok := other.(*MultiPolygon); ok { + // check the intersection for any polygon in the collection. + for _, s2pgn := range p2.s2pgns { + centerPoint := s2cap.Center() + projected := s2pgn.Project(¢erPoint) + distance := projected.Distance(centerPoint) + return distance <= s2cap.Radius(), nil + } + + return false, nil + } + + // check if the other shape is a linestring. + if p2, ok := other.(*LineString); ok { + projected, _ := p2.pl.Project(s2cap.Center()) + distance := projected.Distance(s2cap.Center()) + return distance <= s2cap.Radius(), nil + } + + // check if the other shape is a multilinestring. + if p2, ok := other.(*MultiLineString); ok { + for _, pl := range p2.pls { + projected, _ := pl.Project(s2cap.Center()) + distance := projected.Distance(s2cap.Center()) + if distance <= s2cap.Radius() { + return true, nil + } + } + + return false, nil + } + + if gc, ok := other.(*GeometryCollection); ok { + // check whether the circle intersects with any of the + // member shapes Contains the geometrycollection. + if geometryCollectionIntersectsShape(gc, shapeIn) { + return true, nil + } + return false, nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + if s2cap.Intersects(*c.s2cap) { + return true, nil + } + return false, nil + } + + // check if the other shape is a envelope. + if e, ok := other.(*Envelope); ok { + + if e.r.ContainsPoint(s2cap.Center()) { + return true, nil + } + + latlngs := []s2.LatLng{e.r.Vertex(0), e.r.Vertex(1), + e.r.Vertex(2), e.r.Vertex(3), e.r.Vertex(0)} + pl := s2.PolylineFromLatLngs(latlngs) + projected, _ := pl.Project(s2cap.Center()) + distance := projected.Distance(s2cap.Center()) + if distance <= s2cap.Radius() { + return true, nil + } + + return false, nil + } + + return false, fmt.Errorf("unknown geojson type: %s"+ + " found in document", other.Type()) +} + +// checkCircleContainsShape checks for containment of the +// shape in the document with the circle. +func checkCircleContainsShape(s2cap *s2.Cap, + shapeIn, other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + + if s2cap.ContainsPoint(*p2.s2point) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + // check the intersection for every point in the collection. + for _, point := range p2.s2points { + if !s2cap.ContainsPoint(*point) { + return false, nil + } + } + + return true, nil + } + + // check if the other shape is a polygon. + if p2, ok := other.(*Polygon); ok { + for i := 0; i < p2.s2pgn.NumEdges(); i++ { + edge := p2.s2pgn.Edge(i) + if !s2cap.ContainsPoint(edge.V0) || + !s2cap.ContainsPoint(edge.V1) { + return false, nil + } + } + return true, nil + } + + // check if the other shape is a multipolygon. + if p2, ok := other.(*MultiPolygon); ok { + // check the containment for every polygon in the collection. + for _, s2pgn := range p2.s2pgns { + for i := 0; i < s2pgn.NumEdges(); i++ { + edge := s2pgn.Edge(i) + if !s2cap.ContainsPoint(edge.V0) || + !s2cap.ContainsPoint(edge.V1) { + return false, nil + } + } + } + + return true, nil + } + + // check if the other shape is a linestring. + if p2, ok := other.(*LineString); ok { + for i := 0; i < p2.pl.NumEdges(); i++ { + edge := p2.pl.Edge(i) + // check whether both the end vertices are inside the circle. + if s2cap.ContainsPoint(edge.V0) && + s2cap.ContainsPoint(edge.V1) { + return true, nil + } + } + return false, nil + } + + // check if the other shape is a multilinestring. + if p2, ok := other.(*MultiLineString); ok { + for _, pl := range p2.pls { + for i := 0; i < pl.NumEdges(); i++ { + edge := pl.Edge(i) + // check whether both the end vertices are inside the circle. + if !(s2cap.ContainsPoint(edge.V0) && s2cap.ContainsPoint(edge.V1)) { + return false, nil + } + } + } + return true, nil + } + + if gc, ok := other.(*GeometryCollection); ok { + for _, shape := range gc.Members() { + contains, err := shapeIn.Contains(shape) + if err == nil && !contains { + return false, nil + } + } + return true, nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + + if s2cap.Contains(*c.s2cap) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a envelope. + if e, ok := other.(*Envelope); ok { + + for i := 0; i < 4; i++ { + if !s2cap.ContainsPoint( + s2.PointFromLatLng(e.r.Vertex(i))) { + return false, nil + } + } + + return true, nil + } + + return false, fmt.Errorf("unknown geojson type: %s"+ + " found in document", other.Type()) +} + +// ------------------------------------------------------------------------ + +// checkEnvelopeIntersectsShape checks whether the given shape in +// the document is intersecting Contains the envelope/rectangle. +func checkEnvelopeIntersectsShape(s2rect *s2.Rect, shapeIn, + other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + s2cell := s2.CellFromPoint(*p2.s2point) + + if s2rect.IntersectsCell(s2cell) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + // check the intersection for any point in the collection. + for _, point := range p2.s2points { + s2cell := s2.CellFromPoint(*point) + + if s2rect.IntersectsCell(s2cell) { + return true, nil + } + } + + return false, nil + } + + // check if the other shape is a polygon. + if pgn, ok := other.(*Polygon); ok { + + if rectangleIntersectsWithPolygons(s2rect, + []*s2.Polygon{pgn.s2pgn}) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipolygon. + if mpgn, ok := other.(*MultiPolygon); ok { + // check the intersection for any polygon in the collection. + if rectangleIntersectsWithPolygons(s2rect, mpgn.s2pgns) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a linestring. + if ls, ok := other.(*LineString); ok { + + if rectangleIntersectsWithLineStrings(s2rect, + []*s2.Polyline{ls.pl}) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multilinestring. + if mls, ok := other.(*MultiLineString); ok { + + if rectangleIntersectsWithLineStrings(s2rect, mls.pls) { + return true, nil + } + + return false, nil + } + + if gc, ok := other.(*GeometryCollection); ok { + // check for the intersection of every member shape + // within the geometrycollection. + if geometryCollectionIntersectsShape(gc, shapeIn) { + return true, nil + } + return false, nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + s2pgn := s2PolygonFromS2Rectangle(s2rect) + cp := c.s2cap.Center() + projected := s2pgn.Project(&cp) + distance := projected.Distance(cp) + return distance <= c.s2cap.Radius(), nil + } + + // check if the other shape is a envelope. + if e, ok := other.(*Envelope); ok { + + if s2rect.Intersects(*e.r) { + return true, nil + } + + return false, nil + } + + return false, fmt.Errorf("unknown geojson type: %s"+ + " found in document", other.Type()) +} + +// checkEnvelopeContainsShape checks whether the given shape in +// the document is contained Contains the envelope/rectangle. +func checkEnvelopeContainsShape(s2rect *s2.Rect, shapeIn, + other index.GeoJSON) (bool, error) { + // check if the other shape is a point. + if p2, ok := other.(*Point); ok { + s2LatLng := s2.LatLngFromPoint(*p2.s2point) + + if s2rect.ContainsLatLng(s2LatLng) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a multipoint. + if p2, ok := other.(*MultiPoint); ok { + // check the intersection for any point in the collection. + for _, point := range p2.s2points { + s2LatLng := s2.LatLngFromPoint(*point) + + if !s2rect.ContainsLatLng(s2LatLng) { + return false, nil + } + } + + return true, nil + } + + // check if the other shape is a polygon. + if p2, ok := other.(*Polygon); ok { + s2pgnRect := s2PolygonFromS2Rectangle(s2rect) + return s2pgnRect.Contains(p2.s2pgn), nil + } + + // check if the other shape is a multipolygon. + if p2, ok := other.(*MultiPolygon); ok { + s2pgnRect := s2PolygonFromS2Rectangle(s2rect) + + // check the containment for every polygon in the collection. + for _, s2pgn := range p2.s2pgns { + if !s2pgnRect.Contains(s2pgn) { + return false, nil + } + } + + return true, nil + } + + // check if the other shape is a linestring. + if p2, ok := other.(*LineString); ok { + for i := 0; i < p2.pl.NumEdges(); i++ { + edge := p2.pl.Edge(i) + if !s2rect.ContainsPoint(edge.V0) || + !s2rect.ContainsPoint(edge.V1) { + return false, nil + } + } + + return true, nil + } + + // check if the other shape is a multilinestring. + if p2, ok := other.(*MultiLineString); ok { + for _, pl := range p2.pls { + for i := 0; i < pl.NumEdges(); i++ { + edge := pl.Edge(i) + if !s2rect.ContainsPoint(edge.V0) || + !s2rect.ContainsPoint(edge.V1) { + return false, nil + } + } + } + return true, nil + } + + if gc, ok := other.(*GeometryCollection); ok { + for _, shape := range gc.Members() { + contains, err := shapeIn.Contains(shape) + if err == nil && !contains { + return false, nil + } + } + return true, nil + } + + // check if the other shape is a circle. + if c, ok := other.(*Circle); ok { + + if s2rect.Contains(c.s2cap.RectBound()) { + return true, nil + } + + return false, nil + } + + // check if the other shape is a envelope. + if e, ok := other.(*Envelope); ok { + + if s2rect.Contains(*e.r) { + return true, nil + } + + return false, nil + } + + return false, fmt.Errorf("unknown geojson type: %s"+ + " found in document", other.Type()) +} diff --git a/vendor/github.com/blevesearch/geo/geojson/geojson_shapes_util.go b/vendor/github.com/blevesearch/geo/geojson/geojson_shapes_util.go new file mode 100644 index 0000000..a62f685 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/geojson/geojson_shapes_util.go @@ -0,0 +1,586 @@ +// Copyright (c) 2022 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package geojson + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + + index "github.com/blevesearch/bleve_index_api" + "github.com/blevesearch/geo/s2" + jsoniterator "github.com/json-iterator/go" +) + +var jsoniter = jsoniterator.ConfigCompatibleWithStandardLibrary + +// FilterGeoShapesOnRelation extracts the shapes in the document, apply +// the `relation` filter and confirms whether the shape in the document +// satisfies the given relation. +func FilterGeoShapesOnRelation(shape index.GeoJSON, targetShapeBytes []byte, + relation string, reader **bytes.Reader) (bool, error) { + + shapeInDoc, err := extractShapesFromBytes(targetShapeBytes, reader) + if err != nil { + return false, err + } + + return filterShapes(shape, shapeInDoc, relation) +} + +// extractShapesFromBytes unmarshal the bytes to retrieve the +// embedded geojson shape. +func extractShapesFromBytes(targetShapeBytes []byte, r **bytes.Reader) ( + index.GeoJSON, error) { + if (*r) == nil { + *r = bytes.NewReader(targetShapeBytes[1:]) + } else { + (*r).Reset(targetShapeBytes[1:]) + } + + switch targetShapeBytes[0] { + case PointTypePrefix: + point := &Point{s2point: &s2.Point{}} + err := point.s2point.Decode(*r) + if err != nil { + return nil, err + } + return point, nil + + case MultiPointTypePrefix: + var numPoints int32 + err := binary.Read(*r, binary.BigEndian, &numPoints) + if err != nil { + return nil, err + } + multipoint := &MultiPoint{ + s2points: make([]*s2.Point, 0, numPoints), + } + for i := 0; i < int(numPoints); i++ { + s2point := s2.Point{} + err := s2point.Decode((*r)) + if err != nil { + return nil, err + } + multipoint.s2points = append(multipoint.s2points, &s2point) + } + + return multipoint, nil + + case LineStringTypePrefix: + ls := &LineString{pl: &s2.Polyline{}} + err := ls.pl.Decode(*r) + if err != nil { + return nil, err + } + return ls, nil + + case MultiLineStringTypePrefix: + var numLineStrings int32 + err := binary.Read(*r, binary.BigEndian, &numLineStrings) + if err != nil { + return nil, err + } + + mls := &MultiLineString{pls: make([]*s2.Polyline, 0, numLineStrings)} + + for i := 0; i < int(numLineStrings); i++ { + pl := &s2.Polyline{} + err := pl.Decode(*r) + if err != nil { + return nil, err + } + mls.pls = append(mls.pls, pl) + } + + return mls, nil + + case PolygonTypePrefix: + pgn := &Polygon{s2pgn: &s2.Polygon{}} + err := pgn.s2pgn.Decode(*r) + if err != nil { + return nil, err + } + + return pgn, nil + + case MultiPolygonTypePrefix: + var numPolygons int32 + err := binary.Read(*r, binary.BigEndian, &numPolygons) + if err != nil { + return nil, err + } + mpgns := &MultiPolygon{s2pgns: make([]*s2.Polygon, 0, numPolygons)} + for i := 0; i < int(numPolygons); i++ { + pgn := &s2.Polygon{} + err := pgn.Decode(*r) + if err != nil { + return nil, err + } + mpgns.s2pgns = append(mpgns.s2pgns, pgn) + } + + return mpgns, nil + + case GeometryCollectionTypePrefix: + var numShapes int32 + err := binary.Read(*r, binary.BigEndian, &numShapes) + if err != nil { + return nil, err + } + + lengths := make([]int32, numShapes) + for i := int32(0); i < numShapes; i++ { + var length int32 + err := binary.Read(*r, binary.BigEndian, &length) + if err != nil { + return nil, err + } + lengths[i] = length + } + + inputBytes := targetShapeBytes[len(targetShapeBytes)-(*r).Len():] + gc := &GeometryCollection{Shapes: make([]index.GeoJSON, numShapes)} + + for i := int32(0); i < numShapes; i++ { + shape, err := extractShapesFromBytes(inputBytes[:lengths[i]], r) + if err != nil { + return nil, err + } + + gc.Shapes[i] = shape + inputBytes = inputBytes[lengths[i]:] + } + + return gc, nil + + case CircleTypePrefix: + c := &Circle{s2cap: &s2.Cap{}} + err := c.s2cap.Decode(*r) + if err != nil { + return nil, err + } + + return c, nil + + case EnvelopeTypePrefix: + e := &Envelope{r: &s2.Rect{}} + err := e.r.Decode(*r) + if err != nil { + return nil, err + } + + return e, nil + } + + return nil, fmt.Errorf("unknown geo shape type: %v", targetShapeBytes[0]) +} + +// filterShapes applies the given relation between the query shape +// and the shape in the document. +func filterShapes(shape index.GeoJSON, + shapeInDoc index.GeoJSON, relation string) (bool, error) { + + if relation == "intersects" { + return shape.Intersects(shapeInDoc) + } + + if relation == "contains" { + return shapeInDoc.Contains(shape) + } + + if relation == "within" { + return shape.Contains(shapeInDoc) + } + + if relation == "disjoint" { + intersects, err := shape.Intersects(shapeInDoc) + return !intersects, err + } + + return false, fmt.Errorf("unknown relation: %s", relation) +} + +// ParseGeoJSONShape unmarshals the geojson/circle/envelope shape +// embedded in the given bytes. +func ParseGeoJSONShape(input []byte) (index.GeoJSON, error) { + var sType string + var tmp struct { + Typ string `json:"type"` + } + err := jsoniter.Unmarshal(input, &tmp) + if err != nil { + return nil, err + } + + sType = strings.ToLower(tmp.Typ) + + switch sType { + case PolygonType: + var rv Polygon + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + case MultiPolygonType: + var rv MultiPolygon + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + case PointType: + var rv Point + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + case MultiPointType: + var rv MultiPoint + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + case LineStringType: + var rv LineString + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + case MultiLineStringType: + var rv MultiLineString + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + case GeometryCollectionType: + var rv GeometryCollection + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + return &rv, nil + + case CircleType: + var rv Circle + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + case EnvelopeType: + var rv Envelope + err := jsoniter.Unmarshal(input, &rv) + if err != nil { + return nil, err + } + rv.init() + return &rv, nil + + default: + return nil, fmt.Errorf("unknown shape type: %s", sType) + } + + return nil, err +} + +// NewGeoJsonShape instantiate a geojson shape/circle or +// an envelope from the given coordinates and type. +func NewGeoJsonShape(coordinates [][][][]float64, typ string) ( + index.GeoJSON, []byte, error) { + if len(coordinates) == 0 { + return nil, nil, fmt.Errorf("missing coordinates") + } + + typ = strings.ToLower(typ) + + switch typ { + case PointType: + point := NewGeoJsonPoint(coordinates[0][0][0]) + value, err := point.(s2Serializable).Marshal() + if err != nil { + return nil, nil, err + } + return point, value, nil + + case MultiPointType: + multipoint := NewGeoJsonMultiPoint(coordinates[0][0]) + value, err := multipoint.(s2Serializable).Marshal() + if err != nil { + return nil, nil, err + } + return multipoint, value, nil + + case LineStringType: + linestring := NewGeoJsonLinestring(coordinates[0][0]) + value, err := linestring.(s2Serializable).Marshal() + if err != nil { + return nil, nil, err + } + return linestring, value, nil + + case MultiLineStringType: + multilinestring := NewGeoJsonMultilinestring(coordinates[0]) + value, err := multilinestring.(s2Serializable).Marshal() + if err != nil { + return nil, nil, err + } + return multilinestring, value, nil + + case PolygonType: + polygon := NewGeoJsonPolygon(coordinates[0]) + value, err := polygon.(s2Serializable).Marshal() + if err != nil { + return nil, nil, err + } + return polygon, value, nil + + case MultiPolygonType: + multipolygon := NewGeoJsonMultiPolygon(coordinates) + value, err := multipolygon.(s2Serializable).Marshal() + if err != nil { + return nil, nil, err + } + return multipolygon, value, nil + + case EnvelopeType: + envelope := NewGeoEnvelope(coordinates[0][0]) + value, err := envelope.(s2Serializable).Marshal() + if err != nil { + return nil, nil, err + } + return envelope, value, nil + } + + return nil, nil, fmt.Errorf("unknown shape type: %s", typ) +} + +// GlueBytes primarily for quicker filtering of docvalues +// during the filtering phase. +var GlueBytes = []byte("##") + +// NewGeometryCollection instantiate a geometrycollection +// and prefix the byte contents with certain glue bytes that +// can be used later while filering the doc values. +func NewGeometryCollection(coordinates [][][][][]float64, + typs []string) (index.GeoJSON, []byte, error) { + if typs == nil { + return nil, nil, fmt.Errorf("nil type information") + } + if len(typs) < len(coordinates) { + return nil, nil, fmt.Errorf("missing type information for some shapes") + } + shapes := make([]index.GeoJSON, 0, len(coordinates)) + for i, vertices := range coordinates { + s, _, err := NewGeoJsonShape(vertices, typs[i]) + if err != nil { + continue + } + shapes = append(shapes, s) + } + + var gc GeometryCollection + gc.Typ = GeometryCollectionType + gc.Shapes = shapes + vbytes, err := gc.Marshal() + if err != nil { + return nil, nil, err + } + + return &gc, vbytes, nil +} + +// NewGeoCircleShape instantiate a circle shape and +// prefix the byte contents with certain glue bytes that +// can be used later while filering the doc values. +func NewGeoCircleShape(cp []float64, + radius string) (*Circle, []byte, error) { + r, err := ParseDistance(radius) + if err != nil { + return nil, nil, err + } + rv := &Circle{Typ: CircleType, Vertices: cp, + Radius: radius, + radiusInMeters: r} + + vbytes, err := rv.Marshal() + if err != nil { + return nil, nil, err + } + + return rv, vbytes, nil +} + +// ------------------------------------------------------------------------ + +func (p *Point) IndexTokens(s *s2.RegionTermIndexer) []string { + p.init() + terms := s.GetIndexTermsForPoint(*p.s2point, "") + return StripCoveringTerms(terms) +} + +func (p *Point) QueryTokens(s *s2.RegionTermIndexer) []string { + p.init() + terms := s.GetQueryTermsForPoint(*p.s2point, "") + return StripCoveringTerms(terms) +} + +// ------------------------------------------------------------------------ + +func (mp *MultiPoint) IndexTokens(s *s2.RegionTermIndexer) []string { + mp.init() + var rv []string + for _, s2point := range mp.s2points { + terms := s.GetIndexTermsForPoint(*s2point, "") + rv = append(rv, terms...) + } + return StripCoveringTerms(rv) +} + +func (mp *MultiPoint) QueryTokens(s *s2.RegionTermIndexer) []string { + mp.init() + var rv []string + for _, s2point := range mp.s2points { + terms := s.GetQueryTermsForPoint(*s2point, "") + rv = append(rv, terms...) + } + + return StripCoveringTerms(rv) +} + +// ------------------------------------------------------------------------ + +func (ls *LineString) IndexTokens(s *s2.RegionTermIndexer) []string { + ls.init() + terms := s.GetIndexTermsForRegion(ls.pl.CapBound(), "") + return StripCoveringTerms(terms) +} + +func (ls *LineString) QueryTokens(s *s2.RegionTermIndexer) []string { + ls.init() + terms := s.GetQueryTermsForRegion(ls.pl.CapBound(), "") + return StripCoveringTerms(terms) +} + +// ------------------------------------------------------------------------ + +func (mls *MultiLineString) IndexTokens(s *s2.RegionTermIndexer) []string { + mls.init() + var rv []string + for _, ls := range mls.pls { + terms := s.GetIndexTermsForRegion(ls.CapBound(), "") + rv = append(rv, terms...) + } + + return StripCoveringTerms(rv) +} + +func (mls *MultiLineString) QueryTokens(s *s2.RegionTermIndexer) []string { + mls.init() + + var rv []string + for _, ls := range mls.pls { + terms := s.GetQueryTermsForRegion(ls.CapBound(), "") + rv = append(rv, terms...) + } + + return StripCoveringTerms(rv) +} + +// ------------------------------------------------------------------------ + +func (mp *MultiPolygon) IndexTokens(s *s2.RegionTermIndexer) []string { + mp.init() + + var rv []string + for _, s2pgn := range mp.s2pgns { + terms := s.GetIndexTermsForRegion(s2pgn.CapBound(), "") + rv = append(rv, terms...) + } + + return StripCoveringTerms(rv) +} + +func (mp *MultiPolygon) QueryTokens(s *s2.RegionTermIndexer) []string { + mp.init() + + var rv []string + for _, s2pgn := range mp.s2pgns { + terms := s.GetQueryTermsForRegion(s2pgn.CapBound(), "") + rv = append(rv, terms...) + } + + return StripCoveringTerms(rv) +} + +// ------------------------------------------------------------------------ + +func (pgn *Polygon) IndexTokens(s *s2.RegionTermIndexer) []string { + pgn.init() + terms := s.GetIndexTermsForRegion( + pgn.s2pgn.CapBound(), "") + return StripCoveringTerms(terms) +} + +func (pgn *Polygon) QueryTokens(s *s2.RegionTermIndexer) []string { + pgn.init() + terms := s.GetQueryTermsForRegion( + pgn.s2pgn.CapBound(), "") + return StripCoveringTerms(terms) +} + +// ------------------------------------------------------------------------ + +func (c *Circle) IndexTokens(s *s2.RegionTermIndexer) []string { + c.init() + return StripCoveringTerms(s.GetIndexTermsForRegion(c.s2cap.CapBound(), "")) +} + +func (c *Circle) QueryTokens(s *s2.RegionTermIndexer) []string { + c.init() + return StripCoveringTerms(s.GetQueryTermsForRegion(c.s2cap.CapBound(), "")) +} + +// ------------------------------------------------------------------------ + +func (e *Envelope) IndexTokens(s *s2.RegionTermIndexer) []string { + e.init() + return StripCoveringTerms(s.GetIndexTermsForRegion(e.r.CapBound(), "")) +} + +func (e *Envelope) QueryTokens(s *s2.RegionTermIndexer) []string { + e.init() + return StripCoveringTerms(s.GetQueryTermsForRegion(e.r.CapBound(), "")) +} diff --git a/vendor/github.com/blevesearch/geo/s2/bits_go18.go b/vendor/github.com/blevesearch/geo/s2/bits_go18.go new file mode 100644 index 0000000..4b8bfef --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/bits_go18.go @@ -0,0 +1,54 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.9 +// +build !go1.9 + +package s2 + +// This file is for the bit manipulation code pre-Go 1.9. + +// findMSBSetNonZero64 returns the index (between 0 and 63) of the most +// significant set bit. Passing zero to this function returns zero. +func findMSBSetNonZero64(x uint64) int { + val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000} + shift := []uint64{1, 2, 4, 8, 16, 32} + var msbPos uint64 + for i := 5; i >= 0; i-- { + if x&val[i] != 0 { + x >>= shift[i] + msbPos |= shift[i] + } + } + return int(msbPos) +} + +const deBruijn64 = 0x03f79d71b4ca8b09 +const digitMask = uint64(1<<64 - 1) + +var deBruijn64Lookup = []byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +// findLSBSetNonZero64 returns the index (between 0 and 63) of the least +// significant set bit. Passing zero to this function returns zero. +// +// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go +// which references (Knuth, volume 4, section 7.3.1). +func findLSBSetNonZero64(x uint64) int { + return int(deBruijn64Lookup[((x&-x)*(deBruijn64&digitMask))>>58]) +} diff --git a/vendor/github.com/blevesearch/geo/s2/bits_go19.go b/vendor/github.com/blevesearch/geo/s2/bits_go19.go new file mode 100644 index 0000000..0de1ac6 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/bits_go19.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.9 +// +build go1.9 + +package s2 + +// This file is for the bit manipulation code post-Go 1.9. + +import "math/bits" + +// findMSBSetNonZero64 returns the index (between 0 and 63) of the most +// significant set bit. Passing zero to this function return zero. +func findMSBSetNonZero64(x uint64) int { + if x == 0 { + return 0 + } + return 63 - bits.LeadingZeros64(x) +} + +// findLSBSetNonZero64 returns the index (between 0 and 63) of the least +// significant set bit. Passing zero to this function return zero. +func findLSBSetNonZero64(x uint64) int { + if x == 0 { + return 0 + } + return bits.TrailingZeros64(x) +} diff --git a/vendor/github.com/blevesearch/geo/s2/cap.go b/vendor/github.com/blevesearch/geo/s2/cap.go new file mode 100644 index 0000000..c4fb2e1 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/cap.go @@ -0,0 +1,519 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/s1" +) + +var ( + // centerPoint is the default center for Caps + centerPoint = PointFromCoords(1.0, 0, 0) +) + +// Cap represents a disc-shaped region defined by a center and radius. +// Technically this shape is called a "spherical cap" (rather than disc) +// because it is not planar; the cap represents a portion of the sphere that +// has been cut off by a plane. The boundary of the cap is the circle defined +// by the intersection of the sphere and the plane. For containment purposes, +// the cap is a closed set, i.e. it contains its boundary. +// +// For the most part, you can use a spherical cap wherever you would use a +// disc in planar geometry. The radius of the cap is measured along the +// surface of the sphere (rather than the straight-line distance through the +// interior). Thus a cap of radius π/2 is a hemisphere, and a cap of radius +// π covers the entire sphere. +// +// The center is a point on the surface of the unit sphere. (Hence the need for +// it to be of unit length.) +// +// A cap can also be defined by its center point and height. The height is the +// distance from the center point to the cutoff plane. There is also support for +// "empty" and "full" caps, which contain no points and all points respectively. +// +// Here are some useful relationships between the cap height (h), the cap +// radius (r), the maximum chord length from the cap's center (d), and the +// radius of cap's base (a). +// +// h = 1 - cos(r) +// = 2 * sin^2(r/2) +// d^2 = 2 * h +// = a^2 + h^2 +// +// The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap. +type Cap struct { + center Point + radius s1.ChordAngle +} + +// CapFromPoint constructs a cap containing a single point. +func CapFromPoint(p Point) Cap { + return CapFromCenterChordAngle(p, 0) +} + +// CapFromCenterAngle constructs a cap with the given center and angle. +func CapFromCenterAngle(center Point, angle s1.Angle) Cap { + return CapFromCenterChordAngle(center, s1.ChordAngleFromAngle(angle)) +} + +// CapFromCenterChordAngle constructs a cap where the angle is expressed as an +// s1.ChordAngle. This constructor is more efficient than using an s1.Angle. +func CapFromCenterChordAngle(center Point, radius s1.ChordAngle) Cap { + return Cap{ + center: center, + radius: radius, + } +} + +// CapFromCenterHeight constructs a cap with the given center and height. A +// negative height yields an empty cap; a height of 2 or more yields a full cap. +// The center should be unit length. +func CapFromCenterHeight(center Point, height float64) Cap { + return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(2*height)) +} + +// CapFromCenterArea constructs a cap with the given center and surface area. +// Note that the area can also be interpreted as the solid angle subtended by the +// cap (because the sphere has unit radius). A negative area yields an empty cap; +// an area of 4*π or more yields a full cap. +func CapFromCenterArea(center Point, area float64) Cap { + return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(area/math.Pi)) +} + +// EmptyCap returns a cap that contains no points. +func EmptyCap() Cap { + return CapFromCenterChordAngle(centerPoint, s1.NegativeChordAngle) +} + +// FullCap returns a cap that contains all points. +func FullCap() Cap { + return CapFromCenterChordAngle(centerPoint, s1.StraightChordAngle) +} + +// IsValid reports whether the Cap is considered valid. +func (c Cap) IsValid() bool { + return c.center.Vector.IsUnit() && c.radius <= s1.StraightChordAngle +} + +// IsEmpty reports whether the cap is empty, i.e. it contains no points. +func (c Cap) IsEmpty() bool { + return c.radius < 0 +} + +// IsFull reports whether the cap is full, i.e. it contains all points. +func (c Cap) IsFull() bool { + return c.radius == s1.StraightChordAngle +} + +// Center returns the cap's center point. +func (c Cap) Center() Point { + return c.center +} + +// Height returns the height of the cap. This is the distance from the center +// point to the cutoff plane. +func (c Cap) Height() float64 { + return float64(0.5 * c.radius) +} + +// Radius returns the cap radius as an s1.Angle. (Note that the cap angle +// is stored internally as a ChordAngle, so this method requires a trigonometric +// operation and may yield a slightly different result than the value passed +// to CapFromCenterAngle). +func (c Cap) Radius() s1.Angle { + return c.radius.Angle() +} + +// Area returns the surface area of the Cap on the unit sphere. +func (c Cap) Area() float64 { + return 2.0 * math.Pi * math.Max(0, c.Height()) +} + +// Contains reports whether this cap contains the other. +func (c Cap) Contains(other Cap) bool { + // In a set containment sense, every cap contains the empty cap. + if c.IsFull() || other.IsEmpty() { + return true + } + return c.radius >= ChordAngleBetweenPoints(c.center, other.center).Add(other.radius) +} + +// Intersects reports whether this cap intersects the other cap. +// i.e. whether they have any points in common. +func (c Cap) Intersects(other Cap) bool { + if c.IsEmpty() || other.IsEmpty() { + return false + } + + return c.radius.Add(other.radius) >= ChordAngleBetweenPoints(c.center, other.center) +} + +// InteriorIntersects reports whether this caps interior intersects the other cap. +func (c Cap) InteriorIntersects(other Cap) bool { + // Make sure this cap has an interior and the other cap is non-empty. + if c.radius <= 0 || other.IsEmpty() { + return false + } + + return c.radius.Add(other.radius) > ChordAngleBetweenPoints(c.center, other.center) +} + +// ContainsPoint reports whether this cap contains the point. +func (c Cap) ContainsPoint(p Point) bool { + return ChordAngleBetweenPoints(c.center, p) <= c.radius +} + +// InteriorContainsPoint reports whether the point is within the interior of this cap. +func (c Cap) InteriorContainsPoint(p Point) bool { + return c.IsFull() || ChordAngleBetweenPoints(c.center, p) < c.radius +} + +// Complement returns the complement of the interior of the cap. A cap and its +// complement have the same boundary but do not share any interior points. +// The complement operator is not a bijection because the complement of a +// singleton cap (containing a single point) is the same as the complement +// of an empty cap. +func (c Cap) Complement() Cap { + if c.IsFull() { + return EmptyCap() + } + if c.IsEmpty() { + return FullCap() + } + + return CapFromCenterChordAngle(Point{c.center.Mul(-1)}, s1.StraightChordAngle.Sub(c.radius)) +} + +// CapBound returns a bounding spherical cap. This is not guaranteed to be exact. +func (c Cap) CapBound() Cap { + return c +} + +// RectBound returns a bounding latitude-longitude rectangle. +// The bounds are not guaranteed to be tight. +func (c Cap) RectBound() Rect { + if c.IsEmpty() { + return EmptyRect() + } + + capAngle := c.Radius().Radians() + allLongitudes := false + lat := r1.Interval{ + Lo: latitude(c.center).Radians() - capAngle, + Hi: latitude(c.center).Radians() + capAngle, + } + lng := s1.FullInterval() + + // Check whether cap includes the south pole. + if lat.Lo <= -math.Pi/2 { + lat.Lo = -math.Pi / 2 + allLongitudes = true + } + + // Check whether cap includes the north pole. + if lat.Hi >= math.Pi/2 { + lat.Hi = math.Pi / 2 + allLongitudes = true + } + + if !allLongitudes { + // Compute the range of longitudes covered by the cap. We use the law + // of sines for spherical triangles. Consider the triangle ABC where + // A is the north pole, B is the center of the cap, and C is the point + // of tangency between the cap boundary and a line of longitude. Then + // C is a right angle, and letting a,b,c denote the sides opposite A,B,C, + // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c). + // Here "a" is the cap angle, and "c" is the colatitude (90 degrees + // minus the latitude). This formula also works for negative latitudes. + // + // The formula for sin(a) follows from the relationship h = 1 - cos(a). + sinA := c.radius.Sin() + sinC := math.Cos(latitude(c.center).Radians()) + if sinA <= sinC { + angleA := math.Asin(sinA / sinC) + lng.Lo = math.Remainder(longitude(c.center).Radians()-angleA, math.Pi*2) + lng.Hi = math.Remainder(longitude(c.center).Radians()+angleA, math.Pi*2) + } + } + return Rect{lat, lng} +} + +// Equal reports whether this cap is equal to the other cap. +func (c Cap) Equal(other Cap) bool { + return (c.radius == other.radius && c.center == other.center) || + (c.IsEmpty() && other.IsEmpty()) || + (c.IsFull() && other.IsFull()) +} + +// ApproxEqual reports whether this cap is equal to the other cap within the given tolerance. +func (c Cap) ApproxEqual(other Cap) bool { + const epsilon = 1e-14 + r2 := float64(c.radius) + otherR2 := float64(other.radius) + return c.center.ApproxEqual(other.center) && + math.Abs(r2-otherR2) <= epsilon || + c.IsEmpty() && otherR2 <= epsilon || + other.IsEmpty() && r2 <= epsilon || + c.IsFull() && otherR2 >= 2-epsilon || + other.IsFull() && r2 >= 2-epsilon +} + +// AddPoint increases the cap if necessary to include the given point. If this cap is empty, +// then the center is set to the point with a zero height. p must be unit-length. +func (c Cap) AddPoint(p Point) Cap { + if c.IsEmpty() { + c.center = p + c.radius = 0 + return c + } + + // After calling cap.AddPoint(p), cap.Contains(p) must be true. However + // we don't need to do anything special to achieve this because Contains() + // does exactly the same distance calculation that we do here. + if newRad := ChordAngleBetweenPoints(c.center, p); newRad > c.radius { + c.radius = newRad + } + return c +} + +// AddCap increases the cap height if necessary to include the other cap. If this cap is empty, +// it is set to the other cap. +func (c Cap) AddCap(other Cap) Cap { + if c.IsEmpty() { + return other + } + if other.IsEmpty() { + return c + } + + // We round up the distance to ensure that the cap is actually contained. + // TODO(roberts): Do some error analysis in order to guarantee this. + dist := ChordAngleBetweenPoints(c.center, other.center).Add(other.radius) + if newRad := dist.Expanded(dblEpsilon * float64(dist)); newRad > c.radius { + c.radius = newRad + } + return c +} + +// Expanded returns a new cap expanded by the given angle. If the cap is empty, +// it returns an empty cap. +func (c Cap) Expanded(distance s1.Angle) Cap { + if c.IsEmpty() { + return EmptyCap() + } + return CapFromCenterChordAngle(c.center, c.radius.Add(s1.ChordAngleFromAngle(distance))) +} + +func (c Cap) String() string { + return fmt.Sprintf("[Center=%v, Radius=%f]", c.center.Vector, c.Radius().Degrees()) +} + +// radiusToHeight converts an s1.Angle into the height of the cap. +func radiusToHeight(r s1.Angle) float64 { + if r.Radians() < 0 { + return float64(s1.NegativeChordAngle) + } + if r.Radians() >= math.Pi { + return float64(s1.RightChordAngle) + } + return float64(0.5 * s1.ChordAngleFromAngle(r)) + +} + +// ContainsCell reports whether the cap contains the given cell. +func (c Cap) ContainsCell(cell Cell) bool { + // If the cap does not contain all cell vertices, return false. + var vertices [4]Point + for k := 0; k < 4; k++ { + vertices[k] = cell.Vertex(k) + if !c.ContainsPoint(vertices[k]) { + return false + } + } + // Otherwise, return true if the complement of the cap does not intersect the cell. + return !c.Complement().intersects(cell, vertices) +} + +// IntersectsCell reports whether the cap intersects the cell. +func (c Cap) IntersectsCell(cell Cell) bool { + // If the cap contains any cell vertex, return true. + var vertices [4]Point + for k := 0; k < 4; k++ { + vertices[k] = cell.Vertex(k) + if c.ContainsPoint(vertices[k]) { + return true + } + } + return c.intersects(cell, vertices) +} + +// intersects reports whether the cap intersects any point of the cell excluding +// its vertices (which are assumed to already have been checked). +func (c Cap) intersects(cell Cell, vertices [4]Point) bool { + // If the cap is a hemisphere or larger, the cell and the complement of the cap + // are both convex. Therefore since no vertex of the cell is contained, no other + // interior point of the cell is contained either. + if c.radius >= s1.RightChordAngle { + return false + } + + // We need to check for empty caps due to the center check just below. + if c.IsEmpty() { + return false + } + + // Optimization: return true if the cell contains the cap center. This allows half + // of the edge checks below to be skipped. + if cell.ContainsPoint(c.center) { + return true + } + + // At this point we know that the cell does not contain the cap center, and the cap + // does not contain any cell vertex. The only way that they can intersect is if the + // cap intersects the interior of some edge. + sin2Angle := c.radius.Sin2() + for k := 0; k < 4; k++ { + edge := cell.Edge(k).Vector + dot := c.center.Vector.Dot(edge) + if dot > 0 { + // The center is in the interior half-space defined by the edge. We do not need + // to consider these edges, since if the cap intersects this edge then it also + // intersects the edge on the opposite side of the cell, because the center is + // not contained with the cell. + continue + } + + // The Norm2() factor is necessary because "edge" is not normalized. + if dot*dot > sin2Angle*edge.Norm2() { + return false + } + + // Otherwise, the great circle containing this edge intersects the interior of the cap. We just + // need to check whether the point of closest approach occurs between the two edge endpoints. + dir := edge.Cross(c.center.Vector) + if dir.Dot(vertices[k].Vector) < 0 && dir.Dot(vertices[(k+1)&3].Vector) > 0 { + return true + } + } + return false +} + +// CellUnionBound computes a covering of the Cap. In general the covering +// consists of at most 4 cells except for very large caps, which may need +// up to 6 cells. The output is not sorted. +func (c Cap) CellUnionBound() []CellID { + // TODO(roberts): The covering could be made quite a bit tighter by mapping + // the cap to a rectangle in (i,j)-space and finding a covering for that. + + // Find the maximum level such that the cap contains at most one cell vertex + // and such that CellID.AppendVertexNeighbors() can be called. + level := MinWidthMetric.MaxLevel(c.Radius().Radians()) - 1 + + // If level < 0, more than three face cells are required. + if level < 0 { + cellIDs := make([]CellID, 6) + for face := 0; face < 6; face++ { + cellIDs[face] = CellIDFromFace(face) + } + return cellIDs + } + // The covering consists of the 4 cells at the given level that share the + // cell vertex that is closest to the cap center. + return cellIDFromPoint(c.center).VertexNeighbors(level) +} + +// Centroid returns the true centroid of the cap multiplied by its surface area +// The result lies on the ray from the origin through the cap's center, but it +// is not unit length. Note that if you just want the "surface centroid", i.e. +// the normalized result, then it is simpler to call Center. +// +// The reason for multiplying the result by the cap area is to make it +// easier to compute the centroid of more complicated shapes. The centroid +// of a union of disjoint regions can be computed simply by adding their +// Centroid() results. Caveat: for caps that contain a single point +// (i.e., zero radius), this method always returns the origin (0, 0, 0). +// This is because shapes with no area don't affect the centroid of a +// union whose total area is positive. +func (c Cap) Centroid() Point { + // From symmetry, the centroid of the cap must be somewhere on the line + // from the origin to the center of the cap on the surface of the sphere. + // When a sphere is divided into slices of constant thickness by a set of + // parallel planes, all slices have the same surface area. This implies + // that the radial component of the centroid is simply the midpoint of the + // range of radial distances spanned by the cap. That is easily computed + // from the cap height. + if c.IsEmpty() { + return Point{} + } + r := 1 - 0.5*c.Height() + return Point{c.center.Mul(r * c.Area())} +} + +// Union returns the smallest cap which encloses this cap and other. +func (c Cap) Union(other Cap) Cap { + // If the other cap is larger, swap c and other for the rest of the computations. + if c.radius < other.radius { + c, other = other, c + } + + if c.IsFull() || other.IsEmpty() { + return c + } + + // TODO: This calculation would be more efficient using s1.ChordAngles. + cRadius := c.Radius() + otherRadius := other.Radius() + distance := c.center.Distance(other.center) + if cRadius >= distance+otherRadius { + return c + } + + resRadius := 0.5 * (distance + cRadius + otherRadius) + resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center) + return CapFromCenterAngle(resCenter, resRadius) +} + +// Encode encodes the Cap. +func (c Cap) Encode(w io.Writer) error { + e := &encoder{w: w} + c.encode(e) + return e.err +} + +func (c Cap) encode(e *encoder) { + e.writeFloat64(c.center.X) + e.writeFloat64(c.center.Y) + e.writeFloat64(c.center.Z) + e.writeFloat64(float64(c.radius)) +} + +// Decode decodes the Cap. +func (c *Cap) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + c.decode(d) + return d.err +} + +func (c *Cap) decode(d *decoder) { + c.center.X = d.readFloat64() + c.center.Y = d.readFloat64() + c.center.Z = d.readFloat64() + c.radius = s1.ChordAngle(d.readFloat64()) +} diff --git a/vendor/github.com/blevesearch/geo/s2/cell.go b/vendor/github.com/blevesearch/geo/s2/cell.go new file mode 100644 index 0000000..0a01a4f --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/cell.go @@ -0,0 +1,698 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "io" + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// Cell is an S2 region object that represents a cell. Unlike CellIDs, +// it supports efficient containment and intersection tests. However, it is +// also a more expensive representation. +type Cell struct { + face int8 + level int8 + orientation int8 + id CellID + uv r2.Rect +} + +// CellFromCellID constructs a Cell corresponding to the given CellID. +func CellFromCellID(id CellID) Cell { + c := Cell{} + c.id = id + f, i, j, o := c.id.faceIJOrientation() + c.face = int8(f) + c.level = int8(c.id.Level()) + c.orientation = int8(o) + c.uv = ijLevelToBoundUV(i, j, int(c.level)) + return c +} + +// CellFromPoint constructs a cell for the given Point. +func CellFromPoint(p Point) Cell { + return CellFromCellID(cellIDFromPoint(p)) +} + +// CellFromLatLng constructs a cell for the given LatLng. +func CellFromLatLng(ll LatLng) Cell { + return CellFromCellID(CellIDFromLatLng(ll)) +} + +// Face returns the face this cell is on. +func (c Cell) Face() int { + return int(c.face) +} + +// oppositeFace returns the face opposite the given face. +func oppositeFace(face int) int { + return (face + 3) % 6 +} + +// Level returns the level of this cell. +func (c Cell) Level() int { + return int(c.level) +} + +// ID returns the CellID this cell represents. +func (c Cell) ID() CellID { + return c.id +} + +// IsLeaf returns whether this Cell is a leaf or not. +func (c Cell) IsLeaf() bool { + return c.level == maxLevel +} + +// SizeIJ returns the edge length of this cell in (i,j)-space. +func (c Cell) SizeIJ() int { + return sizeIJ(int(c.level)) +} + +// SizeST returns the edge length of this cell in (s,t)-space. +func (c Cell) SizeST() float64 { + return c.id.sizeST(int(c.level)) +} + +// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order +// (lower left, lower right, upper right, upper left in the UV plane). +func (c Cell) Vertex(k int) Point { + return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()} +} + +// Edge returns the inward-facing normal of the great circle passing through +// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3). +func (c Cell) Edge(k int) Point { + switch k { + case 0: + return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom + case 1: + return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right + case 2: + return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top + default: + return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left + } +} + +// BoundUV returns the bounds of this cell in (u,v)-space. +func (c Cell) BoundUV() r2.Rect { + return c.uv +} + +// Center returns the direction vector corresponding to the center in +// (s,t)-space of the given cell. This is the point at which the cell is +// divided into four subcells; it is not necessarily the centroid of the +// cell in (u,v)-space or (x,y,z)-space +func (c Cell) Center() Point { + return Point{c.id.rawPoint().Normalize()} +} + +// Children returns the four direct children of this cell in traversal order +// and returns true. If this is a leaf cell, or the children could not be created, +// false is returned. +// The C++ method is called Subdivide. +func (c Cell) Children() ([4]Cell, bool) { + var children [4]Cell + + if c.id.IsLeaf() { + return children, false + } + + // Compute the cell midpoint in uv-space. + uvMid := c.id.centerUV() + + // Create four children with the appropriate bounds. + cid := c.id.ChildBegin() + for pos := 0; pos < 4; pos++ { + children[pos] = Cell{ + face: c.face, + level: c.level + 1, + orientation: c.orientation ^ int8(posToOrientation[pos]), + id: cid, + } + + // We want to split the cell in half in u and v. To decide which + // side to set equal to the midpoint value, we look at cell's (i,j) + // position within its parent. The index for i is in bit 1 of ij. + ij := posToIJ[c.orientation][pos] + i := ij >> 1 + j := ij & 1 + if i == 1 { + children[pos].uv.X.Hi = c.uv.X.Hi + children[pos].uv.X.Lo = uvMid.X + } else { + children[pos].uv.X.Lo = c.uv.X.Lo + children[pos].uv.X.Hi = uvMid.X + } + if j == 1 { + children[pos].uv.Y.Hi = c.uv.Y.Hi + children[pos].uv.Y.Lo = uvMid.Y + } else { + children[pos].uv.Y.Lo = c.uv.Y.Lo + children[pos].uv.Y.Hi = uvMid.Y + } + cid = cid.Next() + } + return children, true +} + +// ExactArea returns the area of this cell as accurately as possible. +func (c Cell) ExactArea() float64 { + v0, v1, v2, v3 := c.Vertex(0), c.Vertex(1), c.Vertex(2), c.Vertex(3) + return PointArea(v0, v1, v2) + PointArea(v0, v2, v3) +} + +// ApproxArea returns the approximate area of this cell. This method is accurate +// to within 3% percent for all cell sizes and accurate to within 0.1% for cells +// at level 5 or higher (i.e. squares 350km to a side or smaller on the Earth's +// surface). It is moderately cheap to compute. +func (c Cell) ApproxArea() float64 { + // All cells at the first two levels have the same area. + if c.level < 2 { + return c.AverageArea() + } + + // First, compute the approximate area of the cell when projected + // perpendicular to its normal. The cross product of its diagonals gives + // the normal, and the length of the normal is twice the projected area. + flatArea := 0.5 * (c.Vertex(2).Sub(c.Vertex(0).Vector). + Cross(c.Vertex(3).Sub(c.Vertex(1).Vector)).Norm()) + + // Now, compensate for the curvature of the cell surface by pretending + // that the cell is shaped like a spherical cap. The ratio of the + // area of a spherical cap to the area of its projected disc turns out + // to be 2 / (1 + sqrt(1 - r*r)) where r is the radius of the disc. + // For example, when r=0 the ratio is 1, and when r=1 the ratio is 2. + // Here we set Pi*r*r == flatArea to find the equivalent disc. + return flatArea * 2 / (1 + math.Sqrt(1-math.Min(1/math.Pi*flatArea, 1))) +} + +// AverageArea returns the average area of cells at the level of this cell. +// This is accurate to within a factor of 1.7. +func (c Cell) AverageArea() float64 { + return AvgAreaMetric.Value(int(c.level)) +} + +// IntersectsCell reports whether the intersection of this cell and the other cell is not nil. +func (c Cell) IntersectsCell(oc Cell) bool { + return c.id.Intersects(oc.id) +} + +// ContainsCell reports whether this cell contains the other cell. +func (c Cell) ContainsCell(oc Cell) bool { + return c.id.Contains(oc.id) +} + +// CellUnionBound computes a covering of the Cell. +func (c Cell) CellUnionBound() []CellID { + return c.CapBound().CellUnionBound() +} + +// latitude returns the latitude of the cell vertex in radians given by (i,j), +// where i and j indicate the Hi (1) or Lo (0) corner. +func (c Cell) latitude(i, j int) float64 { + var u, v float64 + switch { + case i == 0 && j == 0: + u = c.uv.X.Lo + v = c.uv.Y.Lo + case i == 0 && j == 1: + u = c.uv.X.Lo + v = c.uv.Y.Hi + case i == 1 && j == 0: + u = c.uv.X.Hi + v = c.uv.Y.Lo + case i == 1 && j == 1: + u = c.uv.X.Hi + v = c.uv.Y.Hi + default: + panic("i and/or j is out of bounds") + } + return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians() +} + +// longitude returns the longitude of the cell vertex in radians given by (i,j), +// where i and j indicate the Hi (1) or Lo (0) corner. +func (c Cell) longitude(i, j int) float64 { + var u, v float64 + switch { + case i == 0 && j == 0: + u = c.uv.X.Lo + v = c.uv.Y.Lo + case i == 0 && j == 1: + u = c.uv.X.Lo + v = c.uv.Y.Hi + case i == 1 && j == 0: + u = c.uv.X.Hi + v = c.uv.Y.Lo + case i == 1 && j == 1: + u = c.uv.X.Hi + v = c.uv.Y.Hi + default: + panic("i and/or j is out of bounds") + } + return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians() +} + +var ( + poleMinLat = math.Asin(math.Sqrt(1.0/3)) - 0.5*dblEpsilon +) + +// RectBound returns the bounding rectangle of this cell. +func (c Cell) RectBound() Rect { + if c.level > 0 { + // Except for cells at level 0, the latitude and longitude extremes are + // attained at the vertices. Furthermore, the latitude range is + // determined by one pair of diagonally opposite vertices and the + // longitude range is determined by the other pair. + // + // We first determine which corner (i,j) of the cell has the largest + // absolute latitude. To maximize latitude, we want to find the point in + // the cell that has the largest absolute z-coordinate and the smallest + // absolute x- and y-coordinates. To do this we look at each coordinate + // (u and v), and determine whether we want to minimize or maximize that + // coordinate based on the axis direction and the cell's (u,v) quadrant. + u := c.uv.X.Lo + c.uv.X.Hi + v := c.uv.Y.Lo + c.uv.Y.Hi + var i, j int + if uAxis(int(c.face)).Z == 0 { + if u < 0 { + i = 1 + } + } else if u > 0 { + i = 1 + } + if vAxis(int(c.face)).Z == 0 { + if v < 0 { + j = 1 + } + } else if v > 0 { + j = 1 + } + lat := r1.IntervalFromPoint(c.latitude(i, j)).AddPoint(c.latitude(1-i, 1-j)) + lng := s1.EmptyInterval().AddPoint(c.longitude(i, 1-j)).AddPoint(c.longitude(1-i, j)) + + // We grow the bounds slightly to make sure that the bounding rectangle + // contains LatLngFromPoint(P) for any point P inside the loop L defined by the + // four *normalized* vertices. Note that normalization of a vector can + // change its direction by up to 0.5 * dblEpsilon radians, and it is not + // enough just to add Normalize calls to the code above because the + // latitude/longitude ranges are not necessarily determined by diagonally + // opposite vertex pairs after normalization. + // + // We would like to bound the amount by which the latitude/longitude of a + // contained point P can exceed the bounds computed above. In the case of + // longitude, the normalization error can change the direction of rounding + // leading to a maximum difference in longitude of 2 * dblEpsilon. In + // the case of latitude, the normalization error can shift the latitude by + // up to 0.5 * dblEpsilon and the other sources of error can cause the + // two latitudes to differ by up to another 1.5 * dblEpsilon, which also + // leads to a maximum difference of 2 * dblEpsilon. + return Rect{lat, lng}.expanded(LatLng{s1.Angle(2 * dblEpsilon), s1.Angle(2 * dblEpsilon)}).PolarClosure() + } + + // The 4 cells around the equator extend to +/-45 degrees latitude at the + // midpoints of their top and bottom edges. The two cells covering the + // poles extend down to +/-35.26 degrees at their vertices. The maximum + // error in this calculation is 0.5 * dblEpsilon. + var bound Rect + switch c.face { + case 0: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-math.Pi / 4, math.Pi / 4}} + case 1: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{math.Pi / 4, 3 * math.Pi / 4}} + case 2: + bound = Rect{r1.Interval{poleMinLat, math.Pi / 2}, s1.FullInterval()} + case 3: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{3 * math.Pi / 4, -3 * math.Pi / 4}} + case 4: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-3 * math.Pi / 4, -math.Pi / 4}} + default: + bound = Rect{r1.Interval{-math.Pi / 2, -poleMinLat}, s1.FullInterval()} + } + + // Finally, we expand the bound to account for the error when a point P is + // converted to an LatLng to test for containment. (The bound should be + // large enough so that it contains the computed LatLng of any contained + // point, not just the infinite-precision version.) We don't need to expand + // longitude because longitude is calculated via a single call to math.Atan2, + // which is guaranteed to be semi-monotonic. + return bound.expanded(LatLng{s1.Angle(dblEpsilon), s1.Angle(0)}) +} + +// CapBound returns the bounding cap of this cell. +func (c Cell) CapBound() Cap { + // We use the cell center in (u,v)-space as the cap axis. This vector is very close + // to GetCenter() and faster to compute. Neither one of these vectors yields the + // bounding cap with minimal surface area, but they are both pretty close. + cap := CapFromPoint(Point{faceUVToXYZ(int(c.face), c.uv.Center().X, c.uv.Center().Y).Normalize()}) + for k := 0; k < 4; k++ { + cap = cap.AddPoint(c.Vertex(k)) + } + return cap +} + +// ContainsPoint reports whether this cell contains the given point. Note that +// unlike Loop/Polygon, a Cell is considered to be a closed set. This means +// that a point on a Cell's edge or vertex belong to the Cell and the relevant +// adjacent Cells too. +// +// If you want every point to be contained by exactly one Cell, +// you will need to convert the Cell to a Loop. +func (c Cell) ContainsPoint(p Point) bool { + var uv r2.Point + var ok bool + if uv.X, uv.Y, ok = faceXYZToUV(int(c.face), p); !ok { + return false + } + + // Expand the (u,v) bound to ensure that + // + // CellFromPoint(p).ContainsPoint(p) + // + // is always true. To do this, we need to account for the error when + // converting from (u,v) coordinates to (s,t) coordinates. In the + // normal case the total error is at most dblEpsilon. + return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv) +} + +// Encode encodes the Cell. +func (c Cell) Encode(w io.Writer) error { + e := &encoder{w: w} + c.encode(e) + return e.err +} + +func (c Cell) encode(e *encoder) { + c.id.encode(e) +} + +// Decode decodes the Cell. +func (c *Cell) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + c.decode(d) + return d.err +} + +func (c *Cell) decode(d *decoder) { + c.id.decode(d) + *c = CellFromCellID(c.id) +} + +// vertexChordDist2 returns the squared chord distance from point P to the +// given corner vertex specified by the Hi or Lo values of each. +func (c Cell) vertexChordDist2(p Point, xHi, yHi bool) s1.ChordAngle { + x := c.uv.X.Lo + y := c.uv.Y.Lo + if xHi { + x = c.uv.X.Hi + } + if yHi { + y = c.uv.Y.Hi + } + + return ChordAngleBetweenPoints(p, PointFromCoords(x, y, 1)) +} + +// uEdgeIsClosest reports whether a point P is closer to the interior of the specified +// Cell edge (either the lower or upper edge of the Cell) or to the endpoints. +func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool { + u0 := c.uv.X.Lo + u1 := c.uv.X.Hi + v := c.uv.Y.Lo + if vHi { + v = c.uv.Y.Hi + } + // These are the normals to the planes that are perpendicular to the edge + // and pass through one of its two endpoints. + dir0 := r3.Vector{v*v + 1, -u0 * v, -u0} + dir1 := r3.Vector{v*v + 1, -u1 * v, -u1} + return p.Dot(dir0) > 0 && p.Dot(dir1) < 0 +} + +// vEdgeIsClosest reports whether a point P is closer to the interior of the specified +// Cell edge (either the right or left edge of the Cell) or to the endpoints. +func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool { + v0 := c.uv.Y.Lo + v1 := c.uv.Y.Hi + u := c.uv.X.Lo + if uHi { + u = c.uv.X.Hi + } + dir0 := r3.Vector{-u * v0, u*u + 1, -v0} + dir1 := r3.Vector{-u * v1, u*u + 1, -v1} + return p.Dot(dir0) > 0 && p.Dot(dir1) < 0 +} + +// edgeDistance reports the distance from a Point P to a given Cell edge. The point +// P is given by its dot product, and the uv edge by its normal in the +// given coordinate value. +func edgeDistance(ij, uv float64) s1.ChordAngle { + // Let P by the target point and let R be the closest point on the given + // edge AB. The desired distance PR can be expressed as PR^2 = PQ^2 + QR^2 + // where Q is the point P projected onto the plane through the great circle + // through AB. We can compute the distance PQ^2 perpendicular to the plane + // from "dirIJ" (the dot product of the target point P with the edge + // normal) and the squared length the edge normal (1 + uv**2). + pq2 := (ij * ij) / (1 + uv*uv) + + // We can compute the distance QR as (1 - OQ) where O is the sphere origin, + // and we can compute OQ^2 = 1 - PQ^2 using the Pythagorean theorem. + // (This calculation loses accuracy as angle POQ approaches Pi/2.) + qr := 1 - math.Sqrt(1-pq2) + return s1.ChordAngleFromSquaredLength(pq2 + qr*qr) +} + +// distanceInternal reports the distance from the given point to the interior of +// the cell if toInterior is true or to the boundary of the cell otherwise. +func (c Cell) distanceInternal(targetXYZ Point, toInterior bool) s1.ChordAngle { + // All calculations are done in the (u,v,w) coordinates of this cell's face. + target := faceXYZtoUVW(int(c.face), targetXYZ) + + // Compute dot products with all four upward or rightward-facing edge + // normals. dirIJ is the dot product for the edge corresponding to axis + // I, endpoint J. For example, dir01 is the right edge of the Cell + // (corresponding to the upper endpoint of the u-axis). + dir00 := target.X - target.Z*c.uv.X.Lo + dir01 := target.X - target.Z*c.uv.X.Hi + dir10 := target.Y - target.Z*c.uv.Y.Lo + dir11 := target.Y - target.Z*c.uv.Y.Hi + inside := true + if dir00 < 0 { + inside = false // Target is to the left of the cell + if c.vEdgeIsClosest(target, false) { + return edgeDistance(-dir00, c.uv.X.Lo) + } + } + if dir01 > 0 { + inside = false // Target is to the right of the cell + if c.vEdgeIsClosest(target, true) { + return edgeDistance(dir01, c.uv.X.Hi) + } + } + if dir10 < 0 { + inside = false // Target is below the cell + if c.uEdgeIsClosest(target, false) { + return edgeDistance(-dir10, c.uv.Y.Lo) + } + } + if dir11 > 0 { + inside = false // Target is above the cell + if c.uEdgeIsClosest(target, true) { + return edgeDistance(dir11, c.uv.Y.Hi) + } + } + if inside { + if toInterior { + return s1.ChordAngle(0) + } + // Although you might think of Cells as rectangles, they are actually + // arbitrary quadrilaterals after they are projected onto the sphere. + // Therefore the simplest approach is just to find the minimum distance to + // any of the four edges. + return minChordAngle(edgeDistance(-dir00, c.uv.X.Lo), + edgeDistance(dir01, c.uv.X.Hi), + edgeDistance(-dir10, c.uv.Y.Lo), + edgeDistance(dir11, c.uv.Y.Hi)) + } + + // Otherwise, the closest point is one of the four cell vertices. Note that + // it is *not* trivial to narrow down the candidates based on the edge sign + // tests above, because (1) the edges don't meet at right angles and (2) + // there are points on the far side of the sphere that are both above *and* + // below the cell, etc. + return minChordAngle(c.vertexChordDist2(target, false, false), + c.vertexChordDist2(target, true, false), + c.vertexChordDist2(target, false, true), + c.vertexChordDist2(target, true, true)) +} + +// Distance reports the distance from the cell to the given point. Returns zero if +// the point is inside the cell. +func (c Cell) Distance(target Point) s1.ChordAngle { + return c.distanceInternal(target, true) +} + +// MaxDistance reports the maximum distance from the cell (including its interior) to the +// given point. +func (c Cell) MaxDistance(target Point) s1.ChordAngle { + // First check the 4 cell vertices. If all are within the hemisphere + // centered around target, the max distance will be to one of these vertices. + targetUVW := faceXYZtoUVW(int(c.face), target) + maxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false), + c.vertexChordDist2(targetUVW, true, false), + c.vertexChordDist2(targetUVW, false, true), + c.vertexChordDist2(targetUVW, true, true)) + + if maxDist <= s1.RightChordAngle { + return maxDist + } + + // Otherwise, find the minimum distance dMin to the antipodal point and the + // maximum distance will be pi - dMin. + return s1.StraightChordAngle - c.BoundaryDistance(Point{target.Mul(-1)}) +} + +// BoundaryDistance reports the distance from the cell boundary to the given point. +func (c Cell) BoundaryDistance(target Point) s1.ChordAngle { + return c.distanceInternal(target, false) +} + +// DistanceToEdge returns the minimum distance from the cell to the given edge AB. Returns +// zero if the edge intersects the cell interior. +func (c Cell) DistanceToEdge(a, b Point) s1.ChordAngle { + // Possible optimizations: + // - Currently the (cell vertex, edge endpoint) distances are computed + // twice each, and the length of AB is computed 4 times. + // - To fix this, refactor GetDistance(target) so that it skips calculating + // the distance to each cell vertex. Instead, compute the cell vertices + // and distances in this function, and add a low-level UpdateMinDistance + // that allows the XA, XB, and AB distances to be passed in. + // - It might also be more efficient to do all calculations in UVW-space, + // since this would involve transforming 2 points rather than 4. + + // First, check the minimum distance to the edge endpoints A and B. + // (This also detects whether either endpoint is inside the cell.) + minDist := minChordAngle(c.Distance(a), c.Distance(b)) + if minDist == 0 { + return minDist + } + + // Otherwise, check whether the edge crosses the cell boundary. + crosser := NewChainEdgeCrosser(a, b, c.Vertex(3)) + for i := 0; i < 4; i++ { + if crosser.ChainCrossingSign(c.Vertex(i)) != DoNotCross { + return 0 + } + } + + // Finally, check whether the minimum distance occurs between a cell vertex + // and the interior of the edge AB. (Some of this work is redundant, since + // it also checks the distance to the endpoints A and B again.) + // + // Note that we don't need to check the distance from the interior of AB to + // the interior of a cell edge, because the only way that this distance can + // be minimal is if the two edges cross (already checked above). + for i := 0; i < 4; i++ { + minDist, _ = UpdateMinDistance(c.Vertex(i), a, b, minDist) + } + return minDist +} + +// MaxDistanceToEdge returns the maximum distance from the cell (including its interior) +// to the given edge AB. +func (c Cell) MaxDistanceToEdge(a, b Point) s1.ChordAngle { + // If the maximum distance from both endpoints to the cell is less than π/2 + // then the maximum distance from the edge to the cell is the maximum of the + // two endpoint distances. + maxDist := maxChordAngle(c.MaxDistance(a), c.MaxDistance(b)) + if maxDist <= s1.RightChordAngle { + return maxDist + } + + return s1.StraightChordAngle - c.DistanceToEdge(Point{a.Mul(-1)}, Point{b.Mul(-1)}) +} + +// DistanceToCell returns the minimum distance from this cell to the given cell. +// It returns zero if one cell contains the other. +func (c Cell) DistanceToCell(target Cell) s1.ChordAngle { + // If the cells intersect, the distance is zero. We use the (u,v) ranges + // rather than CellID intersects so that cells that share a partial edge or + // corner are considered to intersect. + if c.face == target.face && c.uv.Intersects(target.uv) { + return 0 + } + + // Otherwise, the minimum distance always occurs between a vertex of one + // cell and an edge of the other cell (including the edge endpoints). This + // represents a total of 32 possible (vertex, edge) pairs. + // + // TODO(roberts): This could be optimized to be at least 5x faster by pruning + // the set of possible closest vertex/edge pairs using the faces and (u,v) + // ranges of both cells. + var va, vb [4]Point + for i := 0; i < 4; i++ { + va[i] = c.Vertex(i) + vb[i] = target.Vertex(i) + } + minDist := s1.InfChordAngle() + for i := 0; i < 4; i++ { + for j := 0; j < 4; j++ { + minDist, _ = UpdateMinDistance(va[i], vb[j], vb[(j+1)&3], minDist) + minDist, _ = UpdateMinDistance(vb[i], va[j], va[(j+1)&3], minDist) + } + } + return minDist +} + +// MaxDistanceToCell returns the maximum distance from the cell (including its +// interior) to the given target cell. +func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle { + // Need to check the antipodal target for intersection with the cell. If it + // intersects, the distance is the straight ChordAngle. + // antipodalUV is the transpose of the original UV, interpreted within the opposite face. + antipodalUV := r2.Rect{target.uv.Y, target.uv.X} + if int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) { + return s1.StraightChordAngle + } + + // Otherwise, the maximum distance always occurs between a vertex of one + // cell and an edge of the other cell (including the edge endpoints). This + // represents a total of 32 possible (vertex, edge) pairs. + // + // TODO(roberts): When the maximum distance is at most π/2, the maximum is + // always attained between a pair of vertices, and this could be made much + // faster by testing each vertex pair once rather than the current 4 times. + var va, vb [4]Point + for i := 0; i < 4; i++ { + va[i] = c.Vertex(i) + vb[i] = target.Vertex(i) + } + maxDist := s1.NegativeChordAngle + for i := 0; i < 4; i++ { + for j := 0; j < 4; j++ { + maxDist, _ = UpdateMaxDistance(va[i], vb[j], vb[(j+1)&3], maxDist) + maxDist, _ = UpdateMaxDistance(vb[i], va[j], va[(j+1)&3], maxDist) + } + } + return maxDist +} diff --git a/vendor/github.com/blevesearch/geo/s2/cell_index.go b/vendor/github.com/blevesearch/geo/s2/cell_index.go new file mode 100644 index 0000000..879df48 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/cell_index.go @@ -0,0 +1,584 @@ +// Copyright 2020 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" +) + +const ( + // A special label indicating that the ContentsIterator done is true. + cellIndexDoneContents = -1 +) + +// cellIndexNode represents a node in the CellIndex. Cells are organized in a +// tree such that the ancestors of a given node contain that node. +type cellIndexNode struct { + cellID CellID + label int32 + parent int32 +} + +// newCellIndexNode returns a node with the appropriate default values. +func newCellIndexNode() cellIndexNode { + return cellIndexNode{ + cellID: 0, + label: cellIndexDoneContents, + parent: -1, + } +} + +// A rangeNode represents a range of leaf CellIDs. The range starts at +// startID (a leaf cell) and ends at the startID field of the next +// rangeNode. contents points to the node of the CellIndex cellTree +// representing the cells that overlap this range. +type rangeNode struct { + startID CellID // First leaf cell contained by this range. + contents int32 // Contents of this node (an index within the cell tree). +} + +// CellIndexIterator is an iterator that visits the entire set of indexed +// (CellID, label) pairs in an unspecified order. +type CellIndexIterator struct { + // TODO(roberts): Implement + cellTree []cellIndexNode + pos int +} + +// NewCellIndexIterator creates an iterator for the given CellIndex. +func NewCellIndexIterator(index *CellIndex) *CellIndexIterator { + return &CellIndexIterator{ + cellTree: index.cellTree, + } +} + +// CellID returns the current CellID. +func (c *CellIndexIterator) CellID() CellID { + return c.cellTree[c.pos].cellID +} + +// Label returns the current Label. +func (c *CellIndexIterator) Label() int32 { + return c.cellTree[c.pos].label +} + +func (c *CellIndexIterator) Done() bool { + return c.pos == len(c.cellTree)-1 +} + +func (c *CellIndexIterator) Next() { + c.pos++ +} + +// CellIndexRangeIterator is an iterator that seeks and iterates over a set of +// non-overlapping leaf cell ranges that cover the entire sphere. The indexed +// (CellID, label) pairs that intersect the current leaf cell range can be +// visited using CellIndexContentsIterator (see below). +type CellIndexRangeIterator struct { + rangeNodes []rangeNode + pos int + nonEmpty bool +} + +// NewCellIndexRangeIterator creates an iterator for the given CellIndex. +// The iterator is initially *unpositioned*; you must call a positioning method +// such as Begin() or Seek() before accessing its contents. +func NewCellIndexRangeIterator(index *CellIndex) *CellIndexRangeIterator { + return &CellIndexRangeIterator{ + rangeNodes: index.rangeNodes, + } +} + +// NewCellIndexNonEmptyRangeIterator creates an iterator for the given CellIndex. +// The iterator is initially *unpositioned*; you must call a positioning method such as +// Begin() or Seek() before accessing its contents. +func NewCellIndexNonEmptyRangeIterator(index *CellIndex) *CellIndexRangeIterator { + return &CellIndexRangeIterator{ + rangeNodes: index.rangeNodes, + nonEmpty: true, + } +} + +// StartID reports the CellID of the start of the current range of leaf CellIDs. +// +// If done is true, this returns the last possible CellID. This property means +// that most loops do not need to test done explicitly. +func (c *CellIndexRangeIterator) StartID() CellID { + return c.rangeNodes[c.pos].startID +} + +// LimitID reports the non-inclusive end of the current range of leaf CellIDs. +// +// This assumes the iterator is not done. +func (c *CellIndexRangeIterator) LimitID() CellID { + return c.rangeNodes[c.pos+1].startID +} + +// IsEmpty reports if no (CellID, label) pairs intersect this range. +// Also returns true if done() is true. +func (c *CellIndexRangeIterator) IsEmpty() bool { + return c.rangeNodes[c.pos].contents == cellIndexDoneContents +} + +// Begin positions the iterator at the first range of leaf cells (if any). +func (c *CellIndexRangeIterator) Begin() { + c.pos = 0 + for c.nonEmpty && c.IsEmpty() && !c.Done() { + c.pos++ + } +} + +// Prev positions the iterator at the previous entry and reports whether it was not +// already positioned at the beginning. +func (c *CellIndexRangeIterator) Prev() bool { + if c.nonEmpty { + return c.nonEmptyPrev() + } + return c.prev() +} + +// prev is used to position the iterator at the previous entry without checking +// if nonEmpty is true to prevent unwanted recursion. +func (c *CellIndexRangeIterator) prev() bool { + if c.pos == 0 { + return false + } + + c.pos-- + return true +} + +// Prev positions the iterator at the previous entry, and reports whether it was +// already positioned at the beginning. +func (c *CellIndexRangeIterator) nonEmptyPrev() bool { + for c.prev() { + if !c.IsEmpty() { + return true + } + } + + // Return the iterator to its original position. + if c.IsEmpty() && !c.Done() { + c.Next() + } + return false +} + +// Next advances the iterator to the next range of leaf cells. +// +// This assumes the iterator is not done. +func (c *CellIndexRangeIterator) Next() { + c.pos++ + for c.nonEmpty && c.IsEmpty() && !c.Done() { + c.pos++ + } +} + +// Advance reports if advancing would leave it positioned on a valid range. If +// the value would not be valid, the positioning is not changed. +func (c *CellIndexRangeIterator) Advance(n int) bool { + // Note that the last element of rangeNodes is a sentinel value. + if n >= len(c.rangeNodes)-1-c.pos { + return false + } + c.pos += n + return true +} + +// Finish positions the iterator so that done is true. +func (c *CellIndexRangeIterator) Finish() { + // Note that the last element of rangeNodes is a sentinel value. + c.pos = len(c.rangeNodes) - 1 +} + +// Done reports if the iterator is positioned beyond the last valid range. +func (c *CellIndexRangeIterator) Done() bool { + return c.pos >= len(c.rangeNodes)-1 +} + +// Seek positions the iterator at the first range with startID >= target. +// Such an entry always exists as long as "target" is a valid leaf cell. +// +// Note that it is valid to access startID even when done is true. +func (c *CellIndexRangeIterator) Seek(target CellID) { + c.pos = sort.Search(len(c.rangeNodes), func(i int) bool { + return c.rangeNodes[i].startID > target + }) - 1 + + // Ensure we don't go beyond the beginning. + if c.pos < 0 { + c.pos = 0 + } + + // Nonempty needs to find the next non-empty entry. + for c.nonEmpty && c.IsEmpty() && !c.Done() { + // c.Next() + c.pos++ + } +} + +// CellIndexContentsIterator is an iterator that visits the (CellID, label) pairs +// that cover a set of leaf cell ranges (see CellIndexRangeIterator). Note that +// when multiple leaf cell ranges are visited, this iterator only guarantees that +// each result will be reported at least once, i.e. duplicate values may be +// suppressed. If you want duplicate values to be reported again, be sure to call +// Clear first. +// +// In particular, the implementation guarantees that when multiple leaf +// cell ranges are visited in monotonically increasing order, then each +// (CellID, label) pair is reported exactly once. +type CellIndexContentsIterator struct { + // The maximum index within the cellTree slice visited during the + // previous call to StartUnion. This is used to eliminate duplicate + // values when StartUnion is called multiple times. + nodeCutoff int32 + + // The maximum index within the cellTree visited during the + // current call to StartUnion. This is used to update nodeCutoff. + nextNodeCutoff int32 + + // The value of startID from the previous call to StartUnion. + // This is used to check whether these values are monotonically + // increasing. + prevStartID CellID + + // The cell tree from CellIndex + cellTree []cellIndexNode + + // A copy of the current node in the cell tree. + node cellIndexNode +} + +// NewCellIndexContentsIterator returns a new contents iterator. +// +// Note that the iterator needs to be positioned using StartUnion before +// it can be safely used. +func NewCellIndexContentsIterator(index *CellIndex) *CellIndexContentsIterator { + it := &CellIndexContentsIterator{ + cellTree: index.cellTree, + prevStartID: 0, + nodeCutoff: -1, + nextNodeCutoff: -1, + node: cellIndexNode{label: cellIndexDoneContents}, + } + return it +} + +// Clear clears all state with respect to which range(s) have been visited. +func (c *CellIndexContentsIterator) Clear() { + c.prevStartID = 0 + c.nodeCutoff = -1 + c.nextNodeCutoff = -1 + c.node.label = cellIndexDoneContents +} + +// CellID returns the current CellID. +func (c *CellIndexContentsIterator) CellID() CellID { + return c.node.cellID +} + +// Label returns the current Label. +func (c *CellIndexContentsIterator) Label() int32 { + return c.node.label +} + +// Next advances the iterator to the next (CellID, label) pair covered by the +// current leaf cell range. +// +// This requires the iterator to not be done. +func (c *CellIndexContentsIterator) Next() { + if c.node.parent <= c.nodeCutoff { + // We have already processed this node and its ancestors. + c.nodeCutoff = c.nextNodeCutoff + c.node.label = cellIndexDoneContents + } else { + c.node = c.cellTree[c.node.parent] + } +} + +// Done reports if all (CellID, label) pairs have been visited. +func (c *CellIndexContentsIterator) Done() bool { + return c.node.label == cellIndexDoneContents +} + +// StartUnion positions the ContentsIterator at the first (cell_id, label) pair +// that covers the given leaf cell range. Note that when multiple leaf cell +// ranges are visited using the same ContentsIterator, duplicate values +// may be suppressed. If you don't want this behavior, call Reset() first. +func (c *CellIndexContentsIterator) StartUnion(r *CellIndexRangeIterator) { + if r.StartID() < c.prevStartID { + c.nodeCutoff = -1 // Can't automatically eliminate duplicates. + } + c.prevStartID = r.StartID() + + contents := r.rangeNodes[r.pos].contents + if contents <= c.nodeCutoff { + c.node.label = cellIndexDoneContents + } else { + c.node = c.cellTree[contents] + } + + // When visiting ancestors, we can stop as soon as the node index is smaller + // than any previously visited node index. Because indexes are assigned + // using a preorder traversal, such nodes are guaranteed to have already + // been reported. + c.nextNodeCutoff = contents +} + +// CellIndex stores a collection of (CellID, label) pairs. +// +// The CellIDs may be overlapping or contain duplicate values. For example, a +// CellIndex could store a collection of CellUnions, where each CellUnion +// gets its own non-negative int32 label. +// +// Similar to ShapeIndex and PointIndex which map each stored element to an +// identifier, CellIndex stores a label that is typically used to map the +// results of queries back to client's specific data. +// +// The zero value for a CellIndex is sufficient when constructing a CellIndex. +// +// To build a CellIndex where each Cell has a distinct label, call Add for each +// (CellID, label) pair, and then Build the index. For example: +// +// // contents is a mapping of an identifier in my system (restaurantID, +// // vehicleID, etc) to a CellID +// var contents = map[int32]CellID{...} +// +// for key, val := range contents { +// index.Add(val, key) +// } +// +// index.Build() +// +// There is also a helper method that adds all elements of CellUnion with the +// same label: +// +// index.AddCellUnion(cellUnion, label) +// +// Note that the index is not dynamic; the contents of the index cannot be +// changed once it has been built. Adding more after calling Build results in +// undefined behavior of the index. +// +// There are several options for retrieving data from the index. The simplest +// is to use a built-in method such as IntersectingLabels (which returns +// the labels of all cells that intersect a given target CellUnion): +// +// labels := index.IntersectingLabels(targetUnion); +// +// Alternatively, you can use a ClosestCellQuery which computes the cell(s) +// that are closest to a given target geometry. +// +// For example, here is how to find all cells that are closer than +// distanceLimit to a given target point: +// +// query := NewClosestCellQuery(cellIndex, opts) +// target := NewMinDistanceToPointTarget(targetPoint); +// for result := range query.FindCells(target) { +// // result.Distance() is the distance to the target. +// // result.CellID() is the indexed CellID. +// // result.Label() is the label associated with the CellID. +// DoSomething(targetPoint, result); +// } +// +// Internally, the index consists of a set of non-overlapping leaf cell ranges +// that subdivide the sphere and such that each range intersects a particular +// set of (cellID, label) pairs. +// +// Most clients should use either the methods such as VisitIntersectingCells +// and IntersectingLabels, or a helper such as ClosestCellQuery. +type CellIndex struct { + // A tree of (cellID, label) pairs such that if X is an ancestor of Y, then + // X.cellID contains Y.cellID. The contents of a given range of leaf + // cells can be represented by pointing to a node of this tree. + cellTree []cellIndexNode + + // The last element of rangeNodes is a sentinel value, which is necessary + // in order to represent the range covered by the previous element. + rangeNodes []rangeNode +} + +// Add adds the given CellID and Label to the index. +func (c *CellIndex) Add(id CellID, label int32) { + if label < 0 { + panic("labels must be non-negative") + } + c.cellTree = append(c.cellTree, cellIndexNode{cellID: id, label: label, parent: -1}) +} + +// AddCellUnion adds all of the elements of the given CellUnion to the index with the same label. +func (c *CellIndex) AddCellUnion(cu CellUnion, label int32) { + if label < 0 { + panic("labels must be non-negative") + } + for _, cell := range cu { + c.Add(cell, label) + } +} + +// Build builds the index for use. This method should only be called once. +func (c *CellIndex) Build() { + // To build the cell tree and leaf cell ranges, we maintain a stack of + // (CellID, label) pairs that contain the current leaf cell. This struct + // represents an instruction to push or pop a (cellID, label) pair. + // + // If label >= 0, the (cellID, label) pair is pushed on the stack. + // If CellID == SentinelCellID, a pair is popped from the stack. + // Otherwise the stack is unchanged but a rangeNode is still emitted. + + // delta represents an entry in a stack of (CellID, label) pairs used in the + // construction of the CellIndex structure. + type delta struct { + startID CellID + cellID CellID + label int32 + } + + deltas := make([]delta, 0, 2*len(c.cellTree)+2) + + // Create two deltas for each (cellID, label) pair: one to add the pair to + // the stack (at the start of its leaf cell range), and one to remove it from + // the stack (at the end of its leaf cell range). + for _, node := range c.cellTree { + deltas = append(deltas, delta{ + startID: node.cellID.RangeMin(), + cellID: node.cellID, + label: node.label, + }) + deltas = append(deltas, delta{ + startID: node.cellID.RangeMax().Next(), + cellID: SentinelCellID, + label: -1, + }) + } + + // We also create two special deltas to ensure that a RangeNode is emitted at + // the beginning and end of the CellID range. + deltas = append(deltas, delta{ + startID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel), + cellID: CellID(0), + label: -1, + }) + deltas = append(deltas, delta{ + startID: CellIDFromFace(5).ChildEndAtLevel(maxLevel), + cellID: CellID(0), + label: -1, + }) + + sort.Slice(deltas, func(i, j int) bool { + // deltas are sorted first by startID, then in reverse order by cellID, + // and then by label. This is necessary to ensure that (1) larger cells + // are pushed on the stack before smaller cells, and (2) cells are popped + // off the stack before any new cells are added. + + if si, sj := deltas[i].startID, deltas[j].startID; si != sj { + return si < sj + } + if si, sj := deltas[i].cellID, deltas[j].cellID; si != sj { + return si > sj + } + return deltas[i].label < deltas[j].label + }) + + // Now walk through the deltas to build the leaf cell ranges and cell tree + // (which is essentially a permanent form of the "stack" described above). + c.cellTree = nil + c.rangeNodes = nil + contents := int32(-1) + for i := 0; i < len(deltas); { + startID := deltas[i].startID + // Process all the deltas associated with the current startID. + for ; i < len(deltas) && deltas[i].startID == startID; i++ { + if deltas[i].label >= 0 { + c.cellTree = append(c.cellTree, cellIndexNode{ + cellID: deltas[i].cellID, + label: deltas[i].label, + parent: contents}) + contents = int32(len(c.cellTree) - 1) + } else if deltas[i].cellID == SentinelCellID { + contents = c.cellTree[contents].parent + } + } + c.rangeNodes = append(c.rangeNodes, rangeNode{startID, contents}) + } +} + +type CellVisitor func(CellID, int32) bool + +func (c *CellIndex) GetIntersectingLabels(target CellUnion) []int32 { + var rv []int32 + c.IntersectingLabels(target, &rv) + return rv +} + +func (c *CellIndex) IntersectingLabels(target CellUnion, labels *[]int32) { + c.VisitIntersectingCells(target, func(cellID CellID, label int32) bool { + *labels = append(*labels, label) + return true + }) + dedupe(labels) + sort.Slice(*labels, func(i, j int) bool { return (*labels)[i] < (*labels)[j] }) +} + +func dedupe(labels *[]int32) { + encountered := make(map[int32]struct{}) + + for v := range *labels { + encountered[(*labels)[v]] = struct{}{} + } + + (*labels) = (*labels)[:0] + for key, _ := range encountered { + *labels = append(*labels, key) + } +} + +func (c *CellIndex) VisitIntersectingCells(target CellUnion, + visitor CellVisitor) bool { + if len(target) == 0 { + return true + } + + var pos int + cItr := NewCellIndexContentsIterator(c) + rItr := NewCellIndexNonEmptyRangeIterator(c) + rItr.Begin() + for pos < len(target) { + if rItr.LimitID() <= target[pos].RangeMin() { + rItr.Seek(target[pos].RangeMin()) + } + + for rItr.StartID() <= target[pos].RangeMax() { + for cItr.StartUnion(rItr); cItr.Done(); cItr.Next() { + if !visitor(cItr.CellID(), cItr.Label()) { + return false + } + } + } + + pos++ + if pos < len(target) && target[pos].RangeMax() < rItr.StartID() { + pos = target.lowerBound(pos, len(target), rItr.StartID()) + if target[pos-1].RangeMax() >= rItr.StartID() { + pos-- + } + } + } + return true +} + +// TODO(roberts): Differences from C++ +// IntersectingLabels +// VisitIntersectingCells +// CellIndexIterator diff --git a/vendor/github.com/blevesearch/geo/s2/cellid.go b/vendor/github.com/blevesearch/geo/s2/cellid.go new file mode 100644 index 0000000..c6cbaf2 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/cellid.go @@ -0,0 +1,944 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "bytes" + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// CellID uniquely identifies a cell in the S2 cell decomposition. +// The most significant 3 bits encode the face number (0-5). The +// remaining 61 bits encode the position of the center of this cell +// along the Hilbert curve on that face. The zero value and the value +// (1<<64)-1 are invalid cell IDs. The first compares less than any +// valid cell ID, the second as greater than any valid cell ID. +// +// Sequentially increasing cell IDs follow a continuous space-filling curve +// over the entire sphere. They have the following properties: +// +// - The ID of a cell at level k consists of a 3-bit face number followed +// by k bit pairs that recursively select one of the four children of +// each cell. The next bit is always 1, and all other bits are 0. +// Therefore, the level of a cell is determined by the position of its +// lowest-numbered bit that is turned on (for a cell at level k, this +// position is 2 * (maxLevel - k)). +// +// - The ID of a parent cell is at the midpoint of the range of IDs spanned +// by its children (or by its descendants at any level). +// +// Leaf cells are often used to represent points on the unit sphere, and +// this type provides methods for converting directly between these two +// representations. For cells that represent 2D regions rather than +// discrete point, it is better to use Cells. +type CellID uint64 + +// SentinelCellID is an invalid cell ID guaranteed to be larger than any +// valid cell ID. It is used primarily by ShapeIndex. The value is also used +// by some S2 types when encoding data. +// Note that the sentinel's RangeMin == RangeMax == itself. +const SentinelCellID = CellID(^uint64(0)) + +// sortCellIDs sorts the slice of CellIDs in place. +func sortCellIDs(ci []CellID) { + sort.Sort(cellIDs(ci)) +} + +// cellIDs implements the Sort interface for slices of CellIDs. +type cellIDs []CellID + +func (c cellIDs) Len() int { return len(c) } +func (c cellIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] } + +// TODO(dsymonds): Some of these constants should probably be exported. +const ( + faceBits = 3 + numFaces = 6 + + // This is the number of levels needed to specify a leaf cell. + maxLevel = 30 + + // The extra position bit (61 rather than 60) lets us encode each cell as its + // Hilbert curve position at the cell center (which is halfway along the + // portion of the Hilbert curve that fills that cell). + posBits = 2*maxLevel + 1 + + // The maximum index of a valid leaf cell plus one. The range of valid leaf + // cell indices is [0..maxSize-1]. + maxSize = 1 << maxLevel + + wrapOffset = uint64(numFaces) << posBits +) + +// CellIDFromFacePosLevel returns a cell given its face in the range +// [0,5], the 61-bit Hilbert curve position pos within that face, and +// the level in the range [0,maxLevel]. The position in the cell ID +// will be truncated to correspond to the Hilbert curve position at +// the center of the returned cell. +func CellIDFromFacePosLevel(face int, pos uint64, level int) CellID { + return CellID(uint64(face)< 16 { + return CellID(0) + } + n, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return CellID(0) + } + // Equivalent to right-padding string with zeros to 16 characters. + if len(s) < 16 { + n = n << (4 * uint(16-len(s))) + } + return CellID(n) +} + +// ToToken returns a hex-encoded string of the uint64 cell id, with leading +// zeros included but trailing zeros stripped. +func (ci CellID) ToToken() string { + s := strings.TrimRight(fmt.Sprintf("%016x", uint64(ci)), "0") + if len(s) == 0 { + return "X" + } + return s +} + +// IsValid reports whether ci represents a valid cell. +func (ci CellID) IsValid() bool { + return ci.Face() < numFaces && (ci.lsb()&0x1555555555555555 != 0) +} + +// Face returns the cube face for this cell ID, in the range [0,5]. +func (ci CellID) Face() int { return int(uint64(ci) >> posBits) } + +// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^posBits-1]. +func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> faceBits) } + +// Level returns the subdivision level of this cell ID, in the range [0, maxLevel]. +func (ci CellID) Level() int { + return maxLevel - findLSBSetNonZero64(uint64(ci))>>1 +} + +// IsLeaf returns whether this cell ID is at the deepest level; +// that is, the level at which the cells are smallest. +func (ci CellID) IsLeaf() bool { return uint64(ci)&1 != 0 } + +// ChildPosition returns the child position (0..3) of this cell's +// ancestor at the given level, relative to its parent. The argument +// should be in the range 1..kMaxLevel. For example, +// ChildPosition(1) returns the position of this cell's level-1 +// ancestor within its top-level face cell. +func (ci CellID) ChildPosition(level int) int { + return int(uint64(ci)>>uint64(2*(maxLevel-level)+1)) & 3 +} + +// lsbForLevel returns the lowest-numbered bit that is on for cells at the given level. +func lsbForLevel(level int) uint64 { return 1 << uint64(2*(maxLevel-level)) } + +// Parent returns the cell at the given level, which must be no greater than the current level. +func (ci CellID) Parent(level int) CellID { + lsb := lsbForLevel(level) + return CellID((uint64(ci) & -lsb) | lsb) +} + +// immediateParent is cheaper than Parent, but assumes !ci.isFace(). +func (ci CellID) immediateParent() CellID { + nlsb := CellID(ci.lsb() << 2) + return (ci & -nlsb) | nlsb +} + +// isFace returns whether this is a top-level (face) cell. +func (ci CellID) isFace() bool { return uint64(ci)&(lsbForLevel(0)-1) == 0 } + +// lsb returns the least significant bit that is set. +func (ci CellID) lsb() uint64 { return uint64(ci) & -uint64(ci) } + +// Children returns the four immediate children of this cell. +// If ci is a leaf cell, it returns four identical cells that are not the children. +func (ci CellID) Children() [4]CellID { + var ch [4]CellID + lsb := CellID(ci.lsb()) + ch[0] = ci - lsb + lsb>>2 + lsb >>= 1 + ch[1] = ch[0] + lsb + ch[2] = ch[1] + lsb + ch[3] = ch[2] + lsb + return ch +} + +func sizeIJ(level int) int { + return 1 << uint(maxLevel-level) +} + +// EdgeNeighbors returns the four cells that are adjacent across the cell's four edges. +// Edges 0, 1, 2, 3 are in the down, right, up, left directions in the face space. +// All neighbors are guaranteed to be distinct. +func (ci CellID) EdgeNeighbors() [4]CellID { + level := ci.Level() + size := sizeIJ(level) + f, i, j, _ := ci.faceIJOrientation() + return [4]CellID{ + cellIDFromFaceIJWrap(f, i, j-size).Parent(level), + cellIDFromFaceIJWrap(f, i+size, j).Parent(level), + cellIDFromFaceIJWrap(f, i, j+size).Parent(level), + cellIDFromFaceIJWrap(f, i-size, j).Parent(level), + } +} + +// VertexNeighbors returns the neighboring cellIDs with vertex closest to this cell at the given level. +// (Normally there are four neighbors, but the closest vertex may only have three neighbors if it is one of +// the 8 cube vertices.) +func (ci CellID) VertexNeighbors(level int) []CellID { + halfSize := sizeIJ(level + 1) + size := halfSize << 1 + f, i, j, _ := ci.faceIJOrientation() + + var isame, jsame bool + var ioffset, joffset int + if i&halfSize != 0 { + ioffset = size + isame = (i + size) < maxSize + } else { + ioffset = -size + isame = (i - size) >= 0 + } + if j&halfSize != 0 { + joffset = size + jsame = (j + size) < maxSize + } else { + joffset = -size + jsame = (j - size) >= 0 + } + + results := []CellID{ + ci.Parent(level), + cellIDFromFaceIJSame(f, i+ioffset, j, isame).Parent(level), + cellIDFromFaceIJSame(f, i, j+joffset, jsame).Parent(level), + } + + if isame || jsame { + results = append(results, cellIDFromFaceIJSame(f, i+ioffset, j+joffset, isame && jsame).Parent(level)) + } + + return results +} + +// AllNeighbors returns all neighbors of this cell at the given level. Two +// cells X and Y are neighbors if their boundaries intersect but their +// interiors do not. In particular, two cells that intersect at a single +// point are neighbors. Note that for cells adjacent to a face vertex, the +// same neighbor may be returned more than once. There could be up to eight +// neighbors including the diagonal ones that share the vertex. +// +// This requires level >= ci.Level(). +func (ci CellID) AllNeighbors(level int) []CellID { + var neighbors []CellID + + face, i, j, _ := ci.faceIJOrientation() + + // Find the coordinates of the lower left-hand leaf cell. We need to + // normalize (i,j) to a known position within the cell because level + // may be larger than this cell's level. + size := sizeIJ(ci.Level()) + i &= -size + j &= -size + + nbrSize := sizeIJ(level) + + // We compute the top-bottom, left-right, and diagonal neighbors in one + // pass. The loop test is at the end of the loop to avoid 32-bit overflow. + for k := -nbrSize; ; k += nbrSize { + var sameFace bool + if k < 0 { + sameFace = (j+k >= 0) + } else if k >= size { + sameFace = (j+k < maxSize) + } else { + sameFace = true + // Top and bottom neighbors. + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j-nbrSize, + j-size >= 0).Parent(level)) + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j+size, + j+size < maxSize).Parent(level)) + } + + // Left, right, and diagonal neighbors. + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i-nbrSize, j+k, + sameFace && i-size >= 0).Parent(level)) + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+size, j+k, + sameFace && i+size < maxSize).Parent(level)) + + if k >= size { + break + } + } + + return neighbors +} + +// RangeMin returns the minimum CellID that is contained within this cell. +func (ci CellID) RangeMin() CellID { return CellID(uint64(ci) - (ci.lsb() - 1)) } + +// RangeMax returns the maximum CellID that is contained within this cell. +func (ci CellID) RangeMax() CellID { return CellID(uint64(ci) + (ci.lsb() - 1)) } + +// Contains returns true iff the CellID contains oci. +func (ci CellID) Contains(oci CellID) bool { + return uint64(ci.RangeMin()) <= uint64(oci) && uint64(oci) <= uint64(ci.RangeMax()) +} + +// Intersects returns true iff the CellID intersects oci. +func (ci CellID) Intersects(oci CellID) bool { + return uint64(oci.RangeMin()) <= uint64(ci.RangeMax()) && uint64(oci.RangeMax()) >= uint64(ci.RangeMin()) +} + +// String returns the string representation of the cell ID in the form "1/3210". +func (ci CellID) String() string { + if !ci.IsValid() { + return "Invalid: " + strconv.FormatInt(int64(ci), 16) + } + var b bytes.Buffer + b.WriteByte("012345"[ci.Face()]) // values > 5 will have been picked off by !IsValid above + b.WriteByte('/') + for level := 1; level <= ci.Level(); level++ { + b.WriteByte("0123"[ci.ChildPosition(level)]) + } + return b.String() +} + +// cellIDFromString returns a CellID from a string in the form "1/3210". +func cellIDFromString(s string) CellID { + level := len(s) - 2 + if level < 0 || level > maxLevel { + return CellID(0) + } + face := int(s[0] - '0') + if face < 0 || face > 5 || s[1] != '/' { + return CellID(0) + } + id := CellIDFromFace(face) + for i := 2; i < len(s); i++ { + childPos := s[i] - '0' + if childPos < 0 || childPos > 3 { + return CellID(0) + } + id = id.Children()[childPos] + } + return id +} + +// Point returns the center of the s2 cell on the sphere as a Point. +// The maximum directional error in Point (compared to the exact +// mathematical result) is 1.5 * dblEpsilon radians, and the maximum length +// error is 2 * dblEpsilon (the same as Normalize). +func (ci CellID) Point() Point { return Point{ci.rawPoint().Normalize()} } + +// LatLng returns the center of the s2 cell on the sphere as a LatLng. +func (ci CellID) LatLng() LatLng { return LatLngFromPoint(Point{ci.rawPoint()}) } + +// ChildBegin returns the first child in a traversal of the children of this cell, in Hilbert curve order. +// +// for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() { +// ... +// } +func (ci CellID) ChildBegin() CellID { + ol := ci.lsb() + return CellID(uint64(ci) - ol + ol>>2) +} + +// ChildBeginAtLevel returns the first cell in a traversal of children a given level deeper than this cell, in +// Hilbert curve order. The given level must be no smaller than the cell's level. +// See ChildBegin for example use. +func (ci CellID) ChildBeginAtLevel(level int) CellID { + return CellID(uint64(ci) - ci.lsb() + lsbForLevel(level)) +} + +// ChildEnd returns the first cell after a traversal of the children of this cell in Hilbert curve order. +// The returned cell may be invalid. +func (ci CellID) ChildEnd() CellID { + ol := ci.lsb() + return CellID(uint64(ci) + ol + ol>>2) +} + +// ChildEndAtLevel returns the first cell after the last child in a traversal of children a given level deeper +// than this cell, in Hilbert curve order. +// The given level must be no smaller than the cell's level. +// The returned cell may be invalid. +func (ci CellID) ChildEndAtLevel(level int) CellID { + return CellID(uint64(ci) + ci.lsb() + lsbForLevel(level)) +} + +// Next returns the next cell along the Hilbert curve. +// This is expected to be used with ChildBegin and ChildEnd, +// or ChildBeginAtLevel and ChildEndAtLevel. +func (ci CellID) Next() CellID { + return CellID(uint64(ci) + ci.lsb()<<1) +} + +// Prev returns the previous cell along the Hilbert curve. +func (ci CellID) Prev() CellID { + return CellID(uint64(ci) - ci.lsb()<<1) +} + +// NextWrap returns the next cell along the Hilbert curve, wrapping from last to +// first as necessary. This should not be used with ChildBegin and ChildEnd. +func (ci CellID) NextWrap() CellID { + n := ci.Next() + if uint64(n) < wrapOffset { + return n + } + return CellID(uint64(n) - wrapOffset) +} + +// PrevWrap returns the previous cell along the Hilbert curve, wrapping around from +// first to last as necessary. This should not be used with ChildBegin and ChildEnd. +func (ci CellID) PrevWrap() CellID { + p := ci.Prev() + if uint64(p) < wrapOffset { + return p + } + return CellID(uint64(p) + wrapOffset) +} + +// AdvanceWrap advances or retreats the indicated number of steps along the +// Hilbert curve at the current level and returns the new position. The +// position wraps between the first and last faces as necessary. +func (ci CellID) AdvanceWrap(steps int64) CellID { + if steps == 0 { + return ci + } + + // We clamp the number of steps if necessary to ensure that we do not + // advance past the End() or before the Begin() of this level. + shift := uint(2*(maxLevel-ci.Level()) + 1) + if steps < 0 { + if min := -int64(uint64(ci) >> shift); steps < min { + wrap := int64(wrapOffset >> shift) + steps %= wrap + if steps < min { + steps += wrap + } + } + } else { + // Unlike Advance(), we don't want to return End(level). + if max := int64((wrapOffset - uint64(ci)) >> shift); steps > max { + wrap := int64(wrapOffset >> shift) + steps %= wrap + if steps > max { + steps -= wrap + } + } + } + + // If steps is negative, then shifting it left has undefined behavior. + // Cast to uint64 for a 2's complement answer. + return CellID(uint64(ci) + (uint64(steps) << shift)) +} + +// Encode encodes the CellID. +func (ci CellID) Encode(w io.Writer) error { + e := &encoder{w: w} + ci.encode(e) + return e.err +} + +func (ci CellID) encode(e *encoder) { + e.writeUint64(uint64(ci)) +} + +// Decode decodes the CellID. +func (ci *CellID) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + ci.decode(d) + return d.err +} + +func (ci *CellID) decode(d *decoder) { + *ci = CellID(d.readUint64()) +} + +// TODO: the methods below are not exported yet. Settle on the entire API design +// before doing this. Do we want to mirror the C++ one as closely as possible? + +// distanceFromBegin returns the number of steps along the Hilbert curve that +// this cell is from the first node in the S2 hierarchy at our level. (i.e., +// FromFace(0).ChildBeginAtLevel(ci.Level())). This is analogous to Pos(), but +// for this cell's level. +// The return value is always non-negative. +func (ci CellID) distanceFromBegin() int64 { + return int64(ci >> uint64(2*(maxLevel-ci.Level())+1)) +} + +// rawPoint returns an unnormalized r3 vector from the origin through the center +// of the s2 cell on the sphere. +func (ci CellID) rawPoint() r3.Vector { + face, si, ti := ci.faceSiTi() + return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti))) +} + +// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell. +func (ci CellID) faceSiTi() (face int, si, ti uint32) { + face, i, j, _ := ci.faceIJOrientation() + delta := 0 + if ci.IsLeaf() { + delta = 1 + } else { + if (i^(int(ci)>>2))&1 != 0 { + delta = 2 + } + } + return face, uint32(2*i + delta), uint32(2*j + delta) +} + +// faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci. +func (ci CellID) faceIJOrientation() (f, i, j, orientation int) { + f = ci.Face() + orientation = f & swapMask + nbits := maxLevel - 7*lookupBits // first iteration + + // Each iteration maps 8 bits of the Hilbert curve position into + // 4 bits of "i" and "j". The lookup table transforms a key of the + // form "ppppppppoo" to a value of the form "iiiijjjjoo", where the + // letters [ijpo] represents bits of "i", "j", the Hilbert curve + // position, and the Hilbert curve orientation respectively. + // + // On the first iteration we need to be careful to clear out the bits + // representing the cube face. + for k := 7; k >= 0; k-- { + orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint(2*nbits)) - 1)) << 2 + orientation = lookupIJ[orientation] + i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits) + j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits) + orientation &= (swapMask | invertMask) + nbits = lookupBits // following iterations + } + + // The position of a non-leaf cell at level "n" consists of a prefix of + // 2*n bits that identifies the cell, followed by a suffix of + // 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is + // just "1" and has no effect. Otherwise, it consists of "10", followed + // by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has + // no effect, while each occurrence of "00" has the effect of reversing + // the swapMask bit. + if ci.lsb()&0x1111111111111110 != 0 { + orientation ^= swapMask + } + + return +} + +// cellIDFromFaceIJ returns a leaf cell given its cube face (range 0..5) and IJ coordinates. +func cellIDFromFaceIJ(f, i, j int) CellID { + // Note that this value gets shifted one bit to the left at the end + // of the function. + n := uint64(f) << (posBits - 1) + // Alternating faces have opposite Hilbert curve orientations; this + // is necessary in order for all faces to have a right-handed + // coordinate system. + bits := f & swapMask + // Each iteration maps 4 bits of "i" and "j" into 8 bits of the Hilbert + // curve position. The lookup table transforms a 10-bit key of the form + // "iiiijjjjoo" to a 10-bit value of the form "ppppppppoo", where the + // letters [ijpo] denote bits of "i", "j", Hilbert curve position, and + // Hilbert curve orientation respectively. + for k := 7; k >= 0; k-- { + mask := (1 << lookupBits) - 1 + bits += ((i >> uint(k*lookupBits)) & mask) << (lookupBits + 2) + bits += ((j >> uint(k*lookupBits)) & mask) << 2 + bits = lookupPos[bits] + n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits) + bits &= (swapMask | invertMask) + } + return CellID(n*2 + 1) +} + +func cellIDFromFaceIJWrap(f, i, j int) CellID { + // Convert i and j to the coordinates of a leaf cell just beyond the + // boundary of this face. This prevents 32-bit overflow in the case + // of finding the neighbors of a face cell. + i = clampInt(i, -1, maxSize) + j = clampInt(j, -1, maxSize) + + // We want to wrap these coordinates onto the appropriate adjacent face. + // The easiest way to do this is to convert the (i,j) coordinates to (x,y,z) + // (which yields a point outside the normal face boundary), and then call + // xyzToFaceUV to project back onto the correct face. + // + // The code below converts (i,j) to (si,ti), and then (si,ti) to (u,v) using + // the linear projection (u=2*s-1 and v=2*t-1). (The code further below + // converts back using the inverse projection, s=0.5*(u+1) and t=0.5*(v+1). + // Any projection would work here, so we use the simplest.) We also clamp + // the (u,v) coordinates so that the point is barely outside the + // [-1,1]x[-1,1] face rectangle, since otherwise the reprojection step + // (which divides by the new z coordinate) might change the other + // coordinates enough so that we end up in the wrong leaf cell. + const scale = 1.0 / maxSize + limit := math.Nextafter(1, 2) + u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-maxSize))) + v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-maxSize))) + + // Find the leaf cell coordinates on the adjacent face, and convert + // them to a cell id at the appropriate level. + f, u, v = xyzToFaceUV(faceUVToXYZ(f, u, v)) + return cellIDFromFaceIJ(f, stToIJ(0.5*(u+1)), stToIJ(0.5*(v+1))) +} + +func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID { + if sameFace { + return cellIDFromFaceIJ(f, i, j) + } + return cellIDFromFaceIJWrap(f, i, j) +} + +// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding +// s- or t-value contained by that cell. The argument must be in the range +// [0..2**30], i.e. up to one position beyond the normal range of valid leaf +// cell indices. +func ijToSTMin(i int) float64 { + return float64(i) / float64(maxSize) +} + +// stToIJ converts value in ST coordinates to a value in IJ coordinates. +func stToIJ(s float64) int { + return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1) +} + +// cellIDFromPoint returns a leaf cell containing point p. Usually there is +// exactly one such cell, but for points along the edge of a cell, any +// adjacent cell may be (deterministically) chosen. This is because +// s2.CellIDs are considered to be closed sets. The returned cell will +// always contain the given point, i.e. +// +// CellFromPoint(p).ContainsPoint(p) +// +// is always true. +func cellIDFromPoint(p Point) CellID { + f, u, v := xyzToFaceUV(r3.Vector{p.X, p.Y, p.Z}) + i := stToIJ(uvToST(u)) + j := stToIJ(uvToST(v)) + return cellIDFromFaceIJ(f, i, j) +} + +// ijLevelToBoundUV returns the bounds in (u,v)-space for the cell at the given +// level containing the leaf cell with the given (i,j)-coordinates. +func ijLevelToBoundUV(i, j, level int) r2.Rect { + cellSize := sizeIJ(level) + xLo := i & -cellSize + yLo := j & -cellSize + + return r2.Rect{ + X: r1.Interval{ + Lo: stToUV(ijToSTMin(xLo)), + Hi: stToUV(ijToSTMin(xLo + cellSize)), + }, + Y: r1.Interval{ + Lo: stToUV(ijToSTMin(yLo)), + Hi: stToUV(ijToSTMin(yLo + cellSize)), + }, + } +} + +// Constants related to the bit mangling in the Cell ID. +const ( + lookupBits = 4 + swapMask = 0x01 + invertMask = 0x02 +) + +// The following lookup tables are used to convert efficiently between an +// (i,j) cell index and the corresponding position along the Hilbert curve. +// +// lookupPos maps 4 bits of "i", 4 bits of "j", and 2 bits representing the +// orientation of the current cell into 8 bits representing the order in which +// that subcell is visited by the Hilbert curve, plus 2 bits indicating the +// new orientation of the Hilbert curve within that subcell. (Cell +// orientations are represented as combination of swapMask and invertMask.) +// +// lookupIJ is an inverted table used for mapping in the opposite +// direction. +// +// We also experimented with looking up 16 bits at a time (14 bits of position +// plus 2 of orientation) but found that smaller lookup tables gave better +// performance. (2KB fits easily in the primary cache.) +var ( + ijToPos = [4][4]int{ + {0, 1, 3, 2}, // canonical order + {0, 3, 1, 2}, // axes swapped + {2, 3, 1, 0}, // bits inverted + {2, 1, 3, 0}, // swapped & inverted + } + posToIJ = [4][4]int{ + {0, 1, 3, 2}, // canonical order: (0,0), (0,1), (1,1), (1,0) + {0, 2, 3, 1}, // axes swapped: (0,0), (1,0), (1,1), (0,1) + {3, 2, 0, 1}, // bits inverted: (1,1), (1,0), (0,0), (0,1) + {3, 1, 0, 2}, // swapped & inverted: (1,1), (0,1), (0,0), (1,0) + } + posToOrientation = [4]int{swapMask, 0, 0, invertMask | swapMask} + lookupIJ [1 << (2*lookupBits + 2)]int + lookupPos [1 << (2*lookupBits + 2)]int +) + +func init() { + initLookupCell(0, 0, 0, 0, 0, 0) + initLookupCell(0, 0, 0, swapMask, 0, swapMask) + initLookupCell(0, 0, 0, invertMask, 0, invertMask) + initLookupCell(0, 0, 0, swapMask|invertMask, 0, swapMask|invertMask) +} + +// initLookupCell initializes the lookupIJ table at init time. +func initLookupCell(level, i, j, origOrientation, pos, orientation int) { + if level == lookupBits { + ij := (i << lookupBits) + j + lookupPos[(ij<<2)+origOrientation] = (pos << 2) + orientation + lookupIJ[(pos<<2)+origOrientation] = (ij << 2) + orientation + return + } + + level++ + i <<= 1 + j <<= 1 + pos <<= 2 + r := posToIJ[orientation] + initLookupCell(level, i+(r[0]>>1), j+(r[0]&1), origOrientation, pos, orientation^posToOrientation[0]) + initLookupCell(level, i+(r[1]>>1), j+(r[1]&1), origOrientation, pos+1, orientation^posToOrientation[1]) + initLookupCell(level, i+(r[2]>>1), j+(r[2]&1), origOrientation, pos+2, orientation^posToOrientation[2]) + initLookupCell(level, i+(r[3]>>1), j+(r[3]&1), origOrientation, pos+3, orientation^posToOrientation[3]) +} + +// CommonAncestorLevel returns the level of the common ancestor of the two S2 CellIDs. +func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) { + bits := uint64(ci ^ other) + if bits < ci.lsb() { + bits = ci.lsb() + } + if bits < other.lsb() { + bits = other.lsb() + } + + msbPos := findMSBSetNonZero64(bits) + if msbPos > 60 { + return 0, false + } + return (60 - msbPos) >> 1, true +} + +// Advance advances or retreats the indicated number of steps along the +// Hilbert curve at the current level, and returns the new position. The +// position is never advanced past End() or before Begin(). +func (ci CellID) Advance(steps int64) CellID { + if steps == 0 { + return ci + } + + // We clamp the number of steps if necessary to ensure that we do not + // advance past the End() or before the Begin() of this level. Note that + // minSteps and maxSteps always fit in a signed 64-bit integer. + stepShift := uint(2*(maxLevel-ci.Level()) + 1) + if steps < 0 { + minSteps := -int64(uint64(ci) >> stepShift) + if steps < minSteps { + steps = minSteps + } + } else { + maxSteps := int64((wrapOffset + ci.lsb() - uint64(ci)) >> stepShift) + if steps > maxSteps { + steps = maxSteps + } + } + return ci + CellID(steps)<= limit.RangeMin() { + return limit + } + + if ci.RangeMax() >= limit { + // The cell is too large, shrink it. Note that when generating coverings + // of CellID ranges, this loop usually executes only once. Also because + // ci.RangeMin() < limit.RangeMin(), we will always exit the loop by the + // time we reach a leaf cell. + for { + ci = ci.Children()[0] + if ci.RangeMax() < limit { + break + } + } + return ci + } + + // The cell may be too small. Grow it if necessary. Note that generally + // this loop only iterates once. + for !ci.isFace() { + parent := ci.immediateParent() + if parent.RangeMin() != start || parent.RangeMax() >= limit { + break + } + ci = parent + } + return ci +} + +// centerFaceSiTi returns the (face, si, ti) coordinates of the center of the cell. +// Note that although (si,ti) coordinates span the range [0,2**31] in general, +// the cell center coordinates are always in the range [1,2**31-1] and +// therefore can be represented using a signed 32-bit integer. +func (ci CellID) centerFaceSiTi() (face, si, ti int) { + // First we compute the discrete (i,j) coordinates of a leaf cell contained + // within the given cell. Given that cells are represented by the Hilbert + // curve position corresponding at their center, it turns out that the cell + // returned by faceIJOrientation is always one of two leaf cells closest + // to the center of the cell (unless the given cell is a leaf cell itself, + // in which case there is only one possibility). + // + // Given a cell of size s >= 2 (i.e. not a leaf cell), and letting (imin, + // jmin) be the coordinates of its lower left-hand corner, the leaf cell + // returned by faceIJOrientation is either (imin + s/2, jmin + s/2) + // (imin + s/2 - 1, jmin + s/2 - 1). The first case is the one we want. + // We can distinguish these two cases by looking at the low bit of i or + // j. In the second case the low bit is one, unless s == 2 (i.e. the + // level just above leaf cells) in which case the low bit is zero. + // + // In the code below, the expression ((i ^ (int(id) >> 2)) & 1) is true + // if we are in the second case described above. + face, i, j, _ := ci.faceIJOrientation() + delta := 0 + if ci.IsLeaf() { + delta = 1 + } else if (int64(i)^(int64(ci)>>2))&1 == 1 { + delta = 2 + } + + // Note that (2 * {i,j} + delta) will never overflow a 32-bit integer. + return face, 2*i + delta, 2*j + delta +} diff --git a/vendor/github.com/blevesearch/geo/s2/cellunion.go b/vendor/github.com/blevesearch/geo/s2/cellunion.go new file mode 100644 index 0000000..0654de9 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/cellunion.go @@ -0,0 +1,590 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "sort" + + "github.com/golang/geo/s1" +) + +// A CellUnion is a collection of CellIDs. +// +// It is normalized if it is sorted, and does not contain redundancy. +// Specifically, it may not contain the same CellID twice, nor a CellID that +// is contained by another, nor the four sibling CellIDs that are children of +// a single higher level CellID. +// +// CellUnions are not required to be normalized, but certain operations will +// return different results if they are not (e.g. Contains). +type CellUnion []CellID + +// CellUnionFromRange creates a CellUnion that covers the half-open range +// of leaf cells [begin, end). If begin == end the resulting union is empty. +// This requires that begin and end are both leaves, and begin <= end. +// To create a closed-ended range, pass in end.Next(). +func CellUnionFromRange(begin, end CellID) CellUnion { + // We repeatedly add the largest cell we can. + var cu CellUnion + for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) { + cu = append(cu, id) + } + // The output is normalized because the cells are added in order by the iteration. + return cu +} + +// CellUnionFromUnion creates a CellUnion from the union of the given CellUnions. +func CellUnionFromUnion(cellUnions ...CellUnion) CellUnion { + var cu CellUnion + for _, cellUnion := range cellUnions { + cu = append(cu, cellUnion...) + } + cu.Normalize() + return cu +} + +// CellUnionFromIntersection creates a CellUnion from the intersection of the given CellUnions. +func CellUnionFromIntersection(x, y CellUnion) CellUnion { + var cu CellUnion + + // This is a fairly efficient calculation that uses binary search to skip + // over sections of both input vectors. It takes constant time if all the + // cells of x come before or after all the cells of y in CellID order. + var i, j int + for i < len(x) && j < len(y) { + iMin := x[i].RangeMin() + jMin := y[j].RangeMin() + if iMin > jMin { + // Either j.Contains(i) or the two cells are disjoint. + if x[i] <= y[j].RangeMax() { + cu = append(cu, x[i]) + i++ + } else { + // Advance j to the first cell possibly contained by x[i]. + j = y.lowerBound(j+1, len(y), iMin) + // The previous cell y[j-1] may now contain x[i]. + if x[i] <= y[j-1].RangeMax() { + j-- + } + } + } else if jMin > iMin { + // Identical to the code above with i and j reversed. + if y[j] <= x[i].RangeMax() { + cu = append(cu, y[j]) + j++ + } else { + i = x.lowerBound(i+1, len(x), jMin) + if y[j] <= x[i-1].RangeMax() { + i-- + } + } + } else { + // i and j have the same RangeMin(), so one contains the other. + if x[i] < y[j] { + cu = append(cu, x[i]) + i++ + } else { + cu = append(cu, y[j]) + j++ + } + } + } + + // The output is generated in sorted order. + cu.Normalize() + return cu +} + +// CellUnionFromIntersectionWithCellID creates a CellUnion from the intersection +// of a CellUnion with the given CellID. This can be useful for splitting a +// CellUnion into chunks. +func CellUnionFromIntersectionWithCellID(x CellUnion, id CellID) CellUnion { + var cu CellUnion + if x.ContainsCellID(id) { + cu = append(cu, id) + cu.Normalize() + return cu + } + + idmax := id.RangeMax() + for i := x.lowerBound(0, len(x), id.RangeMin()); i < len(x) && x[i] <= idmax; i++ { + cu = append(cu, x[i]) + } + + cu.Normalize() + return cu +} + +// CellUnionFromDifference creates a CellUnion from the difference (x - y) +// of the given CellUnions. +func CellUnionFromDifference(x, y CellUnion) CellUnion { + // TODO(roberts): This is approximately O(N*log(N)), but could probably + // use similar techniques as CellUnionFromIntersectionWithCellID to be more efficient. + + var cu CellUnion + for _, xid := range x { + cu.cellUnionDifferenceInternal(xid, &y) + } + + // The output is generated in sorted order, and there should not be any + // cells that can be merged (provided that both inputs were normalized). + return cu +} + +// The C++ constructor methods FromNormalized and FromVerbatim are not necessary +// since they don't call Normalize, and just set the CellIDs directly on the object, +// so straight casting is sufficient in Go to replicate this behavior. + +// IsValid reports whether the cell union is valid, meaning that the CellIDs are +// valid, non-overlapping, and sorted in increasing order. +func (cu *CellUnion) IsValid() bool { + for i, cid := range *cu { + if !cid.IsValid() { + return false + } + if i == 0 { + continue + } + if (*cu)[i-1].RangeMax() >= cid.RangeMin() { + return false + } + } + return true +} + +// IsNormalized reports whether the cell union is normalized, meaning that it is +// satisfies IsValid and that no four cells have a common parent. +// Certain operations such as Contains will return a different +// result if the cell union is not normalized. +func (cu *CellUnion) IsNormalized() bool { + for i, cid := range *cu { + if !cid.IsValid() { + return false + } + if i == 0 { + continue + } + if (*cu)[i-1].RangeMax() >= cid.RangeMin() { + return false + } + if i < 3 { + continue + } + if areSiblings((*cu)[i-3], (*cu)[i-2], (*cu)[i-1], cid) { + return false + } + } + return true +} + +// Normalize normalizes the CellUnion. +func (cu *CellUnion) Normalize() { + sortCellIDs(*cu) + + output := make([]CellID, 0, len(*cu)) // the list of accepted cells + // Loop invariant: output is a sorted list of cells with no redundancy. + for _, ci := range *cu { + // The first two passes here either ignore this new candidate, + // or remove previously accepted cells that are covered by this candidate. + + // Ignore this cell if it is contained by the previous one. + // We only need to check the last accepted cell. The ordering of the + // cells implies containment (but not the converse), and output has no redundancy, + // so if this candidate is not contained by the last accepted cell + // then it cannot be contained by any previously accepted cell. + if len(output) > 0 && output[len(output)-1].Contains(ci) { + continue + } + + // Discard any previously accepted cells contained by this one. + // This could be any contiguous trailing subsequence, but it can't be + // a discontiguous subsequence because of the containment property of + // sorted S2 cells mentioned above. + j := len(output) - 1 // last index to keep + for j >= 0 { + if !ci.Contains(output[j]) { + break + } + j-- + } + output = output[:j+1] + + // See if the last three cells plus this one can be collapsed. + // We loop because collapsing three accepted cells and adding a higher level cell + // could cascade into previously accepted cells. + for len(output) >= 3 && areSiblings(output[len(output)-3], output[len(output)-2], output[len(output)-1], ci) { + // Replace four children by their parent cell. + output = output[:len(output)-3] + ci = ci.immediateParent() // checked !ci.isFace above + } + output = append(output, ci) + } + *cu = output +} + +// IntersectsCellID reports whether this CellUnion intersects the given cell ID. +func (cu *CellUnion) IntersectsCellID(id CellID) bool { + // Find index of array item that occurs directly after our probe cell: + i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] }) + + if i != len(*cu) && (*cu)[i].RangeMin() <= id.RangeMax() { + return true + } + return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin() +} + +// ContainsCellID reports whether the CellUnion contains the given cell ID. +// Containment is defined with respect to regions, e.g. a cell contains its 4 children. +// +// CAVEAT: If you have constructed a non-normalized CellUnion, note that groups +// of 4 child cells are *not* considered to contain their parent cell. To get +// this behavior you must use one of the call Normalize() explicitly. +func (cu *CellUnion) ContainsCellID(id CellID) bool { + // Find index of array item that occurs directly after our probe cell: + i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] }) + + if i != len(*cu) && (*cu)[i].RangeMin() <= id { + return true + } + return i != 0 && (*cu)[i-1].RangeMax() >= id +} + +// Denormalize replaces this CellUnion with an expanded version of the +// CellUnion where any cell whose level is less than minLevel or where +// (level - minLevel) is not a multiple of levelMod is replaced by its +// children, until either both of these conditions are satisfied or the +// maximum level is reached. +func (cu *CellUnion) Denormalize(minLevel, levelMod int) { + var denorm CellUnion + for _, id := range *cu { + level := id.Level() + newLevel := level + if newLevel < minLevel { + newLevel = minLevel + } + if levelMod > 1 { + newLevel += (maxLevel - (newLevel - minLevel)) % levelMod + if newLevel > maxLevel { + newLevel = maxLevel + } + } + if newLevel == level { + denorm = append(denorm, id) + } else { + end := id.ChildEndAtLevel(newLevel) + for ci := id.ChildBeginAtLevel(newLevel); ci != end; ci = ci.Next() { + denorm = append(denorm, ci) + } + } + } + *cu = denorm +} + +// RectBound returns a Rect that bounds this entity. +func (cu *CellUnion) RectBound() Rect { + bound := EmptyRect() + for _, c := range *cu { + bound = bound.Union(CellFromCellID(c).RectBound()) + } + return bound +} + +// CapBound returns a Cap that bounds this entity. +func (cu *CellUnion) CapBound() Cap { + if len(*cu) == 0 { + return EmptyCap() + } + + // Compute the approximate centroid of the region. This won't produce the + // bounding cap of minimal area, but it should be close enough. + var centroid Point + + for _, ci := range *cu { + area := AvgAreaMetric.Value(ci.Level()) + centroid = Point{centroid.Add(ci.Point().Mul(area))} + } + + if zero := (Point{}); centroid == zero { + centroid = PointFromCoords(1, 0, 0) + } else { + centroid = Point{centroid.Normalize()} + } + + // Use the centroid as the cap axis, and expand the cap angle so that it + // contains the bounding caps of all the individual cells. Note that it is + // *not* sufficient to just bound all the cell vertices because the bounding + // cap may be concave (i.e. cover more than one hemisphere). + c := CapFromPoint(centroid) + for _, ci := range *cu { + c = c.AddCap(CellFromCellID(ci).CapBound()) + } + + return c +} + +// ContainsCell reports whether this cell union contains the given cell. +func (cu *CellUnion) ContainsCell(c Cell) bool { + return cu.ContainsCellID(c.id) +} + +// IntersectsCell reports whether this cell union intersects the given cell. +func (cu *CellUnion) IntersectsCell(c Cell) bool { + return cu.IntersectsCellID(c.id) +} + +// ContainsPoint reports whether this cell union contains the given point. +func (cu *CellUnion) ContainsPoint(p Point) bool { + return cu.ContainsCell(CellFromPoint(p)) +} + +// CellUnionBound computes a covering of the CellUnion. +func (cu *CellUnion) CellUnionBound() []CellID { + return cu.CapBound().CellUnionBound() +} + +// LeafCellsCovered reports the number of leaf cells covered by this cell union. +// This will be no more than 6*2^60 for the whole sphere. +func (cu *CellUnion) LeafCellsCovered() int64 { + var numLeaves int64 + for _, c := range *cu { + numLeaves += 1 << uint64((maxLevel-int64(c.Level()))<<1) + } + return numLeaves +} + +// Returns true if the given four cells have a common parent. +// This requires that the four CellIDs are distinct. +func areSiblings(a, b, c, d CellID) bool { + // A necessary (but not sufficient) condition is that the XOR of the + // four cell IDs must be zero. This is also very fast to test. + if (a ^ b ^ c) != d { + return false + } + + // Now we do a slightly more expensive but exact test. First, compute a + // mask that blocks out the two bits that encode the child position of + // "id" with respect to its parent, then check that the other three + // children all agree with "mask". + mask := d.lsb() << 1 + mask = ^(mask + (mask << 1)) + idMasked := (uint64(d) & mask) + return ((uint64(a)&mask) == idMasked && + (uint64(b)&mask) == idMasked && + (uint64(c)&mask) == idMasked && + !d.isFace()) +} + +// Contains reports whether this CellUnion contains all of the CellIDs of the given CellUnion. +func (cu *CellUnion) Contains(o CellUnion) bool { + // TODO(roberts): Investigate alternatives such as divide-and-conquer + // or alternating-skip-search that may be significantly faster in both + // the average and worst case. This applies to Intersects as well. + for _, id := range o { + if !cu.ContainsCellID(id) { + return false + } + } + + return true +} + +// Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion. +func (cu *CellUnion) Intersects(o CellUnion) bool { + for _, c := range *cu { + if o.IntersectsCellID(c) { + return true + } + } + + return false +} + +// lowerBound returns the index in this CellUnion to the first element whose value +// is not considered to go before the given cell id. (i.e., either it is equivalent +// or comes after the given id.) If there is no match, then end is returned. +func (cu *CellUnion) lowerBound(begin, end int, id CellID) int { + for i := begin; i < end; i++ { + if (*cu)[i] >= id { + return i + } + } + + return end +} + +// cellUnionDifferenceInternal adds the difference between the CellID and the union to +// the result CellUnion. If they intersect but the difference is non-empty, it divides +// and conquers. +func (cu *CellUnion) cellUnionDifferenceInternal(id CellID, other *CellUnion) { + if !other.IntersectsCellID(id) { + (*cu) = append((*cu), id) + return + } + + if !other.ContainsCellID(id) { + for _, child := range id.Children() { + cu.cellUnionDifferenceInternal(child, other) + } + } +} + +// ExpandAtLevel expands this CellUnion by adding a rim of cells at expandLevel +// around the unions boundary. +// +// For each cell c in the union, we add all cells at level +// expandLevel that abut c. There are typically eight of those +// (four edge-abutting and four sharing a vertex). However, if c is +// finer than expandLevel, we add all cells abutting +// c.Parent(expandLevel) as well as c.Parent(expandLevel) itself, +// as an expandLevel cell rarely abuts a smaller cell. +// +// Note that the size of the output is exponential in +// expandLevel. For example, if expandLevel == 20 and the input +// has a cell at level 10, there will be on the order of 4000 +// adjacent cells in the output. For most applications the +// ExpandByRadius method below is easier to use. +func (cu *CellUnion) ExpandAtLevel(level int) { + var output CellUnion + levelLsb := lsbForLevel(level) + for i := len(*cu) - 1; i >= 0; i-- { + id := (*cu)[i] + if id.lsb() < levelLsb { + id = id.Parent(level) + // Optimization: skip over any cells contained by this one. This is + // especially important when very small regions are being expanded. + for i > 0 && id.Contains((*cu)[i-1]) { + i-- + } + } + output = append(output, id) + output = append(output, id.AllNeighbors(level)...) + } + sortCellIDs(output) + + *cu = output + cu.Normalize() +} + +// ExpandByRadius expands this CellUnion such that it contains all points whose +// distance to the CellUnion is at most minRadius, but do not use cells that +// are more than maxLevelDiff levels higher than the largest cell in the input. +// The second parameter controls the tradeoff between accuracy and output size +// when a large region is being expanded by a small amount (e.g. expanding Canada +// by 1km). For example, if maxLevelDiff == 4 the region will always be expanded +// by approximately 1/16 the width of its largest cell. Note that in the worst case, +// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times +// larger than the number of cells in the input. +func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) { + minLevel := maxLevel + for _, cid := range *cu { + minLevel = minInt(minLevel, cid.Level()) + } + + // Find the maximum level such that all cells are at least "minRadius" wide. + radiusLevel := MinWidthMetric.MaxLevel(minRadius.Radians()) + if radiusLevel == 0 && minRadius.Radians() > MinWidthMetric.Value(0) { + // The requested expansion is greater than the width of a face cell. + // The easiest way to handle this is to expand twice. + cu.ExpandAtLevel(0) + } + cu.ExpandAtLevel(minInt(minLevel+maxLevelDiff, radiusLevel)) +} + +// Equal reports whether the two CellUnions are equal. +func (cu CellUnion) Equal(o CellUnion) bool { + if len(cu) != len(o) { + return false + } + for i := 0; i < len(cu); i++ { + if cu[i] != o[i] { + return false + } + } + return true +} + +// AverageArea returns the average area of this CellUnion. +// This is accurate to within a factor of 1.7. +func (cu *CellUnion) AverageArea() float64 { + return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered()) +} + +// ApproxArea returns the approximate area of this CellUnion. This method is accurate +// to within 3% percent for all cell sizes and accurate to within 0.1% for cells +// at level 5 or higher within the union. +func (cu *CellUnion) ApproxArea() float64 { + var area float64 + for _, id := range *cu { + area += CellFromCellID(id).ApproxArea() + } + return area +} + +// ExactArea returns the area of this CellUnion as accurately as possible. +func (cu *CellUnion) ExactArea() float64 { + var area float64 + for _, id := range *cu { + area += CellFromCellID(id).ExactArea() + } + return area +} + +// Encode encodes the CellUnion. +func (cu *CellUnion) Encode(w io.Writer) error { + e := &encoder{w: w} + cu.encode(e) + return e.err +} + +func (cu *CellUnion) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeInt64(int64(len(*cu))) + for _, ci := range *cu { + ci.encode(e) + } +} + +// Decode decodes the CellUnion. +func (cu *CellUnion) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + cu.decode(d) + return d.err +} + +func (cu *CellUnion) decode(d *decoder) { + version := d.readInt8() + if d.err != nil { + return + } + if version != encodingVersion { + d.err = fmt.Errorf("only version %d is supported", encodingVersion) + return + } + n := d.readInt64() + if d.err != nil { + return + } + const maxCells = 1000000 + if n > maxCells { + d.err = fmt.Errorf("too many cells (%d; max is %d)", n, maxCells) + return + } + *cu = make([]CellID, n) + for i := range *cu { + (*cu)[i].decode(d) + } +} diff --git a/vendor/github.com/blevesearch/geo/s2/centroids.go b/vendor/github.com/blevesearch/geo/s2/centroids.go new file mode 100644 index 0000000..e8a91c4 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/centroids.go @@ -0,0 +1,133 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/r3" +) + +// There are several notions of the "centroid" of a triangle. First, there +// is the planar centroid, which is simply the centroid of the ordinary +// (non-spherical) triangle defined by the three vertices. Second, there is +// the surface centroid, which is defined as the intersection of the three +// medians of the spherical triangle. It is possible to show that this +// point is simply the planar centroid projected to the surface of the +// sphere. Finally, there is the true centroid (mass centroid), which is +// defined as the surface integral over the spherical triangle of (x,y,z) +// divided by the triangle area. This is the point that the triangle would +// rotate around if it was spinning in empty space. +// +// The best centroid for most purposes is the true centroid. Unlike the +// planar and surface centroids, the true centroid behaves linearly as +// regions are added or subtracted. That is, if you split a triangle into +// pieces and compute the average of their centroids (weighted by triangle +// area), the result equals the centroid of the original triangle. This is +// not true of the other centroids. +// +// Also note that the surface centroid may be nowhere near the intuitive +// "center" of a spherical triangle. For example, consider the triangle +// with vertices A=(1,eps,0), B=(0,0,1), C=(-1,eps,0) (a quarter-sphere). +// The surface centroid of this triangle is at S=(0, 2*eps, 1), which is +// within a distance of 2*eps of the vertex B. Note that the median from A +// (the segment connecting A to the midpoint of BC) passes through S, since +// this is the shortest path connecting the two endpoints. On the other +// hand, the true centroid is at M=(0, 0.5, 0.5), which when projected onto +// the surface is a much more reasonable interpretation of the "center" of +// this triangle. +// + +// TrueCentroid returns the true centroid of the spherical triangle ABC +// multiplied by the signed area of spherical triangle ABC. The reasons for +// multiplying by the signed area are (1) this is the quantity that needs to be +// summed to compute the centroid of a union or difference of triangles, and +// (2) it's actually easier to calculate this way. All points must have unit length. +// +// Note that the result of this function is defined to be Point(0, 0, 0) if +// the triangle is degenerate. +func TrueCentroid(a, b, c Point) Point { + // Use Distance to get accurate results for small triangles. + ra := float64(1) + if sa := float64(b.Distance(c)); sa != 0 { + ra = sa / math.Sin(sa) + } + rb := float64(1) + if sb := float64(c.Distance(a)); sb != 0 { + rb = sb / math.Sin(sb) + } + rc := float64(1) + if sc := float64(a.Distance(b)); sc != 0 { + rc = sc / math.Sin(sc) + } + + // Now compute a point M such that: + // + // [Ax Ay Az] [Mx] [ra] + // [Bx By Bz] [My] = 0.5 * det(A,B,C) * [rb] + // [Cx Cy Cz] [Mz] [rc] + // + // To improve the numerical stability we subtract the first row (A) from the + // other two rows; this reduces the cancellation error when A, B, and C are + // very close together. Then we solve it using Cramer's rule. + // + // The result is the true centroid of the triangle multiplied by the + // triangle's area. + // + // This code still isn't as numerically stable as it could be. + // The biggest potential improvement is to compute B-A and C-A more + // accurately so that (B-A)x(C-A) is always inside triangle ABC. + x := r3.Vector{a.X, b.X - a.X, c.X - a.X} + y := r3.Vector{a.Y, b.Y - a.Y, c.Y - a.Y} + z := r3.Vector{a.Z, b.Z - a.Z, c.Z - a.Z} + r := r3.Vector{ra, rb - ra, rc - ra} + + return Point{r3.Vector{y.Cross(z).Dot(r), z.Cross(x).Dot(r), x.Cross(y).Dot(r)}.Mul(0.5)} +} + +// EdgeTrueCentroid returns the true centroid of the spherical geodesic edge AB +// multiplied by the length of the edge AB. As with triangles, the true centroid +// of a collection of line segments may be computed simply by summing the result +// of this method for each segment. +// +// Note that the planar centroid of a line segment is simply 0.5 * (a + b), +// while the surface centroid is (a + b).Normalize(). However neither of +// these values is appropriate for computing the centroid of a collection of +// edges (such as a polyline). +// +// Also note that the result of this function is defined to be Point(0, 0, 0) +// if the edge is degenerate. +func EdgeTrueCentroid(a, b Point) Point { + // The centroid (multiplied by length) is a vector toward the midpoint + // of the edge, whose length is twice the sine of half the angle between + // the two vertices. Defining theta to be this angle, we have: + vDiff := a.Sub(b.Vector) // Length == 2*sin(theta) + vSum := a.Add(b.Vector) // Length == 2*cos(theta) + sin2 := vDiff.Norm2() + cos2 := vSum.Norm2() + if cos2 == 0 { + return Point{} // Ignore antipodal edges. + } + return Point{vSum.Mul(math.Sqrt(sin2 / cos2))} // Length == 2*sin(theta) +} + +// PlanarCentroid returns the centroid of the planar triangle ABC. This can be +// normalized to unit length to obtain the "surface centroid" of the corresponding +// spherical triangle, i.e. the intersection of the three medians. However, note +// that for large spherical triangles the surface centroid may be nowhere near +// the intuitive "center". +func PlanarCentroid(a, b, c Point) Point { + return Point{a.Add(b.Vector).Add(c.Vector).Mul(1. / 3)} +} diff --git a/vendor/github.com/blevesearch/geo/s2/contains_point_query.go b/vendor/github.com/blevesearch/geo/s2/contains_point_query.go new file mode 100644 index 0000000..3026f36 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/contains_point_query.go @@ -0,0 +1,190 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// VertexModel defines whether shapes are considered to contain their vertices. +// Note that these definitions differ from the ones used by BooleanOperation. +// +// Note that points other than vertices are never contained by polylines. +// If you want need this behavior, use ClosestEdgeQuery's IsDistanceLess +// with a suitable distance threshold instead. +type VertexModel int + +const ( + // VertexModelOpen means no shapes contain their vertices (not even + // points). Therefore Contains(Point) returns true if and only if the + // point is in the interior of some polygon. + VertexModelOpen VertexModel = iota + + // VertexModelSemiOpen means that polygon point containment is defined + // such that if several polygons tile the region around a vertex, then + // exactly one of those polygons contains that vertex. Points and + // polylines still do not contain any vertices. + VertexModelSemiOpen + + // VertexModelClosed means all shapes contain their vertices (including + // points and polylines). + VertexModelClosed +) + +// ContainsPointQuery determines whether one or more shapes in a ShapeIndex +// contain a given Point. The ShapeIndex may contain any number of points, +// polylines, and/or polygons (possibly overlapping). Shape boundaries may be +// modeled as Open, SemiOpen, or Closed (this affects whether or not shapes are +// considered to contain their vertices). +// +// This type is not safe for concurrent use. +// +// However, note that if you need to do a large number of point containment +// tests, it is more efficient to re-use the query rather than creating a new +// one each time. +type ContainsPointQuery struct { + model VertexModel + index *ShapeIndex + iter *ShapeIndexIterator +} + +// NewContainsPointQuery creates a new instance of the ContainsPointQuery for the index +// and given vertex model choice. +func NewContainsPointQuery(index *ShapeIndex, model VertexModel) *ContainsPointQuery { + return &ContainsPointQuery{ + index: index, + model: model, + iter: index.Iterator(), + } +} + +// Contains reports whether any shape in the queries index contains the point p +// under the queries vertex model (Open, SemiOpen, or Closed). +func (q *ContainsPointQuery) Contains(p Point) bool { + if !q.iter.LocatePoint(p) { + return false + } + + cell := q.iter.IndexCell() + for _, clipped := range cell.shapes { + if q.shapeContains(clipped, q.iter.Center(), p) { + return true + } + } + return false +} + +// shapeContains reports whether the clippedShape from the iterator's center position contains +// the given point. +func (q *ContainsPointQuery) shapeContains(clipped *clippedShape, center, p Point) bool { + inside := clipped.containsCenter + numEdges := clipped.numEdges() + if numEdges <= 0 { + return inside + } + + shape := q.index.Shape(clipped.shapeID) + if shape.Dimension() != 2 { + // Points and polylines can be ignored unless the vertex model is Closed. + if q.model != VertexModelClosed { + return false + } + + // Otherwise, the point is contained if and only if it matches a vertex. + for _, edgeID := range clipped.edges { + edge := shape.Edge(edgeID) + if edge.V0 == p || edge.V1 == p { + return true + } + } + return false + } + + // Test containment by drawing a line segment from the cell center to the + // given point and counting edge crossings. + crosser := NewEdgeCrosser(center, p) + for _, edgeID := range clipped.edges { + edge := shape.Edge(edgeID) + sign := crosser.CrossingSign(edge.V0, edge.V1) + if sign == DoNotCross { + continue + } + if sign == MaybeCross { + // For the Open and Closed models, check whether p is a vertex. + if q.model != VertexModelSemiOpen && (edge.V0 == p || edge.V1 == p) { + return (q.model == VertexModelClosed) + } + // C++ plays fast and loose with the int <-> bool conversions here. + if VertexCrossing(crosser.a, crosser.b, edge.V0, edge.V1) { + sign = Cross + } else { + sign = DoNotCross + } + } + inside = inside != (sign == Cross) + } + + return inside +} + +// ShapeContains reports whether the given shape contains the point under this +// queries vertex model (Open, SemiOpen, or Closed). +// +// This requires the shape belongs to this queries index. +func (q *ContainsPointQuery) ShapeContains(shape Shape, p Point) bool { + if !q.iter.LocatePoint(p) { + return false + } + + clipped := q.iter.IndexCell().findByShapeID(q.index.idForShape(shape)) + if clipped == nil { + return false + } + return q.shapeContains(clipped, q.iter.Center(), p) +} + +// shapeVisitorFunc is a type of function that can be called against shaped in an index. +type shapeVisitorFunc func(shape Shape) bool + +// visitContainingShapes visits all shapes in the given index that contain the +// given point p, terminating early if the given visitor function returns false, +// in which case visitContainingShapes returns false. Each shape is +// visited at most once. +func (q *ContainsPointQuery) visitContainingShapes(p Point, f shapeVisitorFunc) bool { + // This function returns false only if the algorithm terminates early + // because the visitor function returned false. + if !q.iter.LocatePoint(p) { + return true + } + + cell := q.iter.IndexCell() + for _, clipped := range cell.shapes { + if q.shapeContains(clipped, q.iter.Center(), p) && + !f(q.index.Shape(clipped.shapeID)) { + return false + } + } + return true +} + +// ContainingShapes returns a slice of all shapes that contain the given point. +func (q *ContainsPointQuery) ContainingShapes(p Point) []Shape { + var shapes []Shape + q.visitContainingShapes(p, func(shape Shape) bool { + shapes = append(shapes, shape) + return true + }) + return shapes +} + +// TODO(roberts): Remaining methods from C++ +// type edgeVisitorFunc func(shape ShapeEdge) bool +// func (q *ContainsPointQuery) visitIncidentEdges(p Point, v edgeVisitorFunc) bool diff --git a/vendor/github.com/blevesearch/geo/s2/contains_vertex_query.go b/vendor/github.com/blevesearch/geo/s2/contains_vertex_query.go new file mode 100644 index 0000000..8e74f9e --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/contains_vertex_query.go @@ -0,0 +1,63 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// ContainsVertexQuery is used to track the edges entering and leaving the +// given vertex of a Polygon in order to be able to determine if the point is +// contained by the Polygon. +// +// Point containment is defined according to the semi-open boundary model +// which means that if several polygons tile the region around a vertex, +// then exactly one of those polygons contains that vertex. +type ContainsVertexQuery struct { + target Point + edgeMap map[Point]int +} + +// NewContainsVertexQuery returns a new query for the given vertex whose +// containment will be determined. +func NewContainsVertexQuery(target Point) *ContainsVertexQuery { + return &ContainsVertexQuery{ + target: target, + edgeMap: make(map[Point]int), + } +} + +// AddEdge adds the edge between target and v with the given direction. +// (+1 = outgoing, -1 = incoming, 0 = degenerate). +func (q *ContainsVertexQuery) AddEdge(v Point, direction int) { + q.edgeMap[v] += direction +} + +// ContainsVertex reports a +1 if the target vertex is contained, -1 if it is +// not contained, and 0 if the incident edges consisted of matched sibling pairs. +func (q *ContainsVertexQuery) ContainsVertex() int { + // Find the unmatched edge that is immediately clockwise from Ortho(P). + referenceDir := Point{q.target.Ortho()} + + bestPoint := referenceDir + bestDir := 0 + + for k, v := range q.edgeMap { + if v == 0 { + continue // This is a "matched" edge. + } + if OrderedCCW(referenceDir, bestPoint, k, q.target) { + bestPoint = k + bestDir = v + } + } + return bestDir +} diff --git a/vendor/github.com/blevesearch/geo/s2/convex_hull_query.go b/vendor/github.com/blevesearch/geo/s2/convex_hull_query.go new file mode 100644 index 0000000..68539ab --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/convex_hull_query.go @@ -0,0 +1,258 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" + + "github.com/golang/geo/r3" +) + +// ConvexHullQuery builds the convex hull of any collection of points, +// polylines, loops, and polygons. It returns a single convex loop. +// +// The convex hull is defined as the smallest convex region on the sphere that +// contains all of your input geometry. Recall that a region is "convex" if +// for every pair of points inside the region, the straight edge between them +// is also inside the region. In our case, a "straight" edge is a geodesic, +// i.e. the shortest path on the sphere between two points. +// +// Containment of input geometry is defined as follows: +// +// - Each input loop and polygon is contained by the convex hull exactly +// (i.e., according to Polygon's Contains(Polygon)). +// +// - Each input point is either contained by the convex hull or is a vertex +// of the convex hull. (Recall that S2Loops do not necessarily contain their +// vertices.) +// +// - For each input polyline, the convex hull contains all of its vertices +// according to the rule for points above. (The definition of convexity +// then ensures that the convex hull also contains the polyline edges.) +// +// To use this type, call the various Add... methods to add your input geometry, and +// then call ConvexHull. Note that ConvexHull does *not* reset the +// state; you can continue adding geometry if desired and compute the convex +// hull again. If you want to start from scratch, simply create a new +// ConvexHullQuery value. +// +// This implement Andrew's monotone chain algorithm, which is a variant of the +// Graham scan (see https://en.wikipedia.org/wiki/Graham_scan). The time +// complexity is O(n log n), and the space required is O(n). In fact only the +// call to "sort" takes O(n log n) time; the rest of the algorithm is linear. +// +// Demonstration of the algorithm and code: +// en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain +// +// This type is not safe for concurrent use. +type ConvexHullQuery struct { + bound Rect + points []Point +} + +// NewConvexHullQuery creates a new ConvexHullQuery. +func NewConvexHullQuery() *ConvexHullQuery { + return &ConvexHullQuery{ + bound: EmptyRect(), + } +} + +// AddPoint adds the given point to the input geometry. +func (q *ConvexHullQuery) AddPoint(p Point) { + q.bound = q.bound.AddPoint(LatLngFromPoint(p)) + q.points = append(q.points, p) +} + +// AddPolyline adds the given polyline to the input geometry. +func (q *ConvexHullQuery) AddPolyline(p *Polyline) { + q.bound = q.bound.Union(p.RectBound()) + q.points = append(q.points, (*p)...) +} + +// AddLoop adds the given loop to the input geometry. +func (q *ConvexHullQuery) AddLoop(l *Loop) { + q.bound = q.bound.Union(l.RectBound()) + if l.isEmptyOrFull() { + return + } + q.points = append(q.points, l.vertices...) +} + +// AddPolygon adds the given polygon to the input geometry. +func (q *ConvexHullQuery) AddPolygon(p *Polygon) { + q.bound = q.bound.Union(p.RectBound()) + for _, l := range p.loops { + // Only loops at depth 0 can contribute to the convex hull. + if l.depth == 0 { + q.AddLoop(l) + } + } +} + +// CapBound returns a bounding cap for the input geometry provided. +// +// Note that this method does not clear the geometry; you can continue +// adding to it and call this method again if desired. +func (q *ConvexHullQuery) CapBound() Cap { + // We keep track of a rectangular bound rather than a spherical cap because + // it is easy to compute a tight bound for a union of rectangles, whereas it + // is quite difficult to compute a tight bound around a union of caps. + // Also, polygons and polylines implement CapBound() in terms of + // RectBound() for this same reason, so it is much better to keep track + // of a rectangular bound as we go along and convert it at the end. + // + // TODO(roberts): We could compute an optimal bound by implementing Welzl's + // algorithm. However we would still need to have special handling of loops + // and polygons, since if a loop spans more than 180 degrees in any + // direction (i.e., if it contains two antipodal points), then it is not + // enough just to bound its vertices. In this case the only convex bounding + // cap is FullCap(), and the only convex bounding loop is the full loop. + return q.bound.CapBound() +} + +// ConvexHull returns a Loop representing the convex hull of the input geometry provided. +// +// If there is no geometry, this method returns an empty loop containing no +// points. +// +// If the geometry spans more than half of the sphere, this method returns a +// full loop containing the entire sphere. +// +// If the geometry contains 1 or 2 points, or a single edge, this method +// returns a very small loop consisting of three vertices (which are a +// superset of the input vertices). +// +// Note that this method does not clear the geometry; you can continue +// adding to the query and call this method again. +func (q *ConvexHullQuery) ConvexHull() *Loop { + c := q.CapBound() + if c.Height() >= 1 { + // The bounding cap is not convex. The current bounding cap + // implementation is not optimal, but nevertheless it is likely that the + // input geometry itself is not contained by any convex polygon. In any + // case, we need a convex bounding cap to proceed with the algorithm below + // (in order to construct a point "origin" that is definitely outside the + // convex hull). + return FullLoop() + } + + // Remove duplicates. We need to do this before checking whether there are + // fewer than 3 points. + x := make(map[Point]bool) + r, w := 0, 0 // read/write indexes + for ; r < len(q.points); r++ { + if x[q.points[r]] { + continue + } + q.points[w] = q.points[r] + x[q.points[r]] = true + w++ + } + q.points = q.points[:w] + + // This code implements Andrew's monotone chain algorithm, which is a simple + // variant of the Graham scan. Rather than sorting by x-coordinate, instead + // we sort the points in CCW order around an origin O such that all points + // are guaranteed to be on one side of some geodesic through O. This + // ensures that as we scan through the points, each new point can only + // belong at the end of the chain (i.e., the chain is monotone in terms of + // the angle around O from the starting point). + origin := Point{c.Center().Ortho()} + sort.Slice(q.points, func(i, j int) bool { + return RobustSign(origin, q.points[i], q.points[j]) == CounterClockwise + }) + + // Special cases for fewer than 3 points. + switch len(q.points) { + case 0: + return EmptyLoop() + case 1: + return singlePointLoop(q.points[0]) + case 2: + return singleEdgeLoop(q.points[0], q.points[1]) + } + + // Generate the lower and upper halves of the convex hull. Each half + // consists of the maximal subset of vertices such that the edge chain + // makes only left (CCW) turns. + lower := q.monotoneChain() + + // reverse the points + for left, right := 0, len(q.points)-1; left < right; left, right = left+1, right-1 { + q.points[left], q.points[right] = q.points[right], q.points[left] + } + upper := q.monotoneChain() + + // Remove the duplicate vertices and combine the chains. + lower = lower[:len(lower)-1] + upper = upper[:len(upper)-1] + lower = append(lower, upper...) + + return LoopFromPoints(lower) +} + +// monotoneChain iterates through the points, selecting the maximal subset of points +// such that the edge chain makes only left (CCW) turns. +func (q *ConvexHullQuery) monotoneChain() []Point { + var output []Point + for _, p := range q.points { + // Remove any points that would cause the chain to make a clockwise turn. + for len(output) >= 2 && RobustSign(output[len(output)-2], output[len(output)-1], p) != CounterClockwise { + output = output[:len(output)-1] + } + output = append(output, p) + } + return output +} + +// singlePointLoop constructs a 3-vertex polygon consisting of "p" and two nearby +// vertices. Note that ContainsPoint(p) may be false for the resulting loop. +func singlePointLoop(p Point) *Loop { + const offset = 1e-15 + d0 := p.Ortho() + d1 := p.Cross(d0) + vertices := []Point{ + p, + {p.Add(d0.Mul(offset)).Normalize()}, + {p.Add(d1.Mul(offset)).Normalize()}, + } + return LoopFromPoints(vertices) +} + +// singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint. +func singleEdgeLoop(a, b Point) *Loop { + // If the points are exactly antipodal we return the full loop. + // + // Note that we could use the code below even in this case (which would + // return a zero-area loop that follows the edge AB), except that (1) the + // direction of AB is defined using symbolic perturbations and therefore is + // not predictable by ordinary users, and (2) Loop disallows anitpodal + // adjacent vertices and so we would need to use 4 vertices to define the + // degenerate loop. (Note that the Loop antipodal vertex restriction is + // historical and now could easily be removed, however it would still have + // the problem that the edge direction is not easily predictable.) + if a.Add(b.Vector) == (r3.Vector{}) { + return FullLoop() + } + + // Construct a loop consisting of the two vertices and their midpoint. We + // use Interpolate() to ensure that the midpoint is very close to + // the edge even when its endpoints nearly antipodal. + vertices := []Point{a, b, Interpolate(0.5, a, b)} + loop := LoopFromPoints(vertices) + // The resulting loop may be clockwise, so invert it if necessary. + loop.Normalize() + return loop +} diff --git a/vendor/github.com/blevesearch/geo/s2/crossing_edge_query.go b/vendor/github.com/blevesearch/geo/s2/crossing_edge_query.go new file mode 100644 index 0000000..51852da --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/crossing_edge_query.go @@ -0,0 +1,409 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" + + "github.com/golang/geo/r2" +) + +// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by +// a given edge(s). +// +// Note that if you need to query many edges, it is more efficient to declare +// a single CrossingEdgeQuery instance and reuse it. +// +// If you want to find *all* the pairs of crossing edges, it is more efficient to +// use the not yet implemented VisitCrossings in shapeutil. +type CrossingEdgeQuery struct { + index *ShapeIndex + + // temporary values used while processing a query. + a, b r2.Point + iter *ShapeIndexIterator + + // candidate cells generated when finding crossings. + cells []*ShapeIndexCell +} + +// NewCrossingEdgeQuery creates a CrossingEdgeQuery for the given index. +func NewCrossingEdgeQuery(index *ShapeIndex) *CrossingEdgeQuery { + c := &CrossingEdgeQuery{ + index: index, + iter: index.Iterator(), + } + return c +} + +// Crossings returns the set of edge of the shape S that intersect the given edge AB. +// If the CrossingType is Interior, then only intersections at a point interior to both +// edges are reported, while if it is CrossingTypeAll then edges that share a vertex +// are also reported. +func (c *CrossingEdgeQuery) Crossings(a, b Point, shape Shape, crossType CrossingType) []int { + edges := c.candidates(a, b, shape) + if len(edges) == 0 { + return nil + } + + crosser := NewEdgeCrosser(a, b) + out := 0 + n := len(edges) + + for in := 0; in < n; in++ { + b := shape.Edge(edges[in]) + sign := crosser.CrossingSign(b.V0, b.V1) + if crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross) || crossType != CrossingTypeAll && sign == Cross { + edges[out] = edges[in] + out++ + } + } + + if out < n { + edges = edges[0:out] + } + return edges +} + +// EdgeMap stores a sorted set of edge ids for each shape. +type EdgeMap map[Shape][]int + +// CrossingsEdgeMap returns the set of all edges in the index that intersect the given +// edge AB. If crossType is CrossingTypeInterior, then only intersections at a +// point interior to both edges are reported, while if it is CrossingTypeAll +// then edges that share a vertex are also reported. +// +// The edges are returned as a mapping from shape to the edges of that shape +// that intersect AB. Every returned shape has at least one crossing edge. +func (c *CrossingEdgeQuery) CrossingsEdgeMap(a, b Point, crossType CrossingType) EdgeMap { + edgeMap := c.candidatesEdgeMap(a, b) + if len(edgeMap) == 0 { + return nil + } + + crosser := NewEdgeCrosser(a, b) + for shape, edges := range edgeMap { + out := 0 + n := len(edges) + for in := 0; in < n; in++ { + edge := shape.Edge(edges[in]) + sign := crosser.CrossingSign(edge.V0, edge.V1) + if (crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross)) || (crossType != CrossingTypeAll && sign == Cross) { + edgeMap[shape][out] = edges[in] + out++ + } + } + + if out == 0 { + delete(edgeMap, shape) + } else { + if out < n { + edgeMap[shape] = edgeMap[shape][0:out] + } + } + } + return edgeMap +} + +// candidates returns a superset of the edges of the given shape that intersect +// the edge AB. +func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int { + var edges []int + + // For small loops it is faster to use brute force. The threshold below was + // determined using benchmarks. + const maxBruteForceEdges = 27 + maxEdges := shape.NumEdges() + if maxEdges <= maxBruteForceEdges { + edges = make([]int, maxEdges) + for i := 0; i < maxEdges; i++ { + edges[i] = i + } + return edges + } + + // Compute the set of index cells intersected by the query edge. + c.getCellsForEdge(a, b) + if len(c.cells) == 0 { + return nil + } + + // Gather all the edges that intersect those cells and sort them. + // TODO(roberts): Shapes don't track their ID, so we need to range over + // the index to find the ID manually. + var shapeID int32 + for k, v := range c.index.shapes { + if v == shape { + shapeID = k + } + } + + for _, cell := range c.cells { + if cell == nil { + continue + } + clipped := cell.findByShapeID(shapeID) + if clipped == nil { + continue + } + edges = append(edges, clipped.edges...) + } + + if len(c.cells) > 1 { + edges = uniqueInts(edges) + } + + return edges +} + +// uniqueInts returns the sorted uniqued values from the given input. +func uniqueInts(in []int) []int { + var edges []int + m := make(map[int]bool) + for _, i := range in { + if m[i] { + continue + } + m[i] = true + edges = append(edges, i) + } + sort.Ints(edges) + return edges +} + +// candidatesEdgeMap returns a map from shapes to the superse of edges for that +// shape that intersect the edge AB. +// +// CAVEAT: This method may return shapes that have an empty set of candidate edges. +// However the return value is non-empty only if at least one shape has a candidate edge. +func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap { + edgeMap := make(EdgeMap) + + // If there are only a few edges then it's faster to use brute force. We + // only bother with this optimization when there is a single shape. + if len(c.index.shapes) == 1 { + // Typically this method is called many times, so it is worth checking + // whether the edge map is empty or already consists of a single entry for + // this shape, and skip clearing edge map in that case. + shape := c.index.Shape(0) + + // Note that we leave the edge map non-empty even if there are no candidates + // (i.e., there is a single entry with an empty set of edges). + edgeMap[shape] = c.candidates(a, b, shape) + return edgeMap + } + + // Compute the set of index cells intersected by the query edge. + c.getCellsForEdge(a, b) + if len(c.cells) == 0 { + return edgeMap + } + + // Gather all the edges that intersect those cells and sort them. + for _, cell := range c.cells { + for _, clipped := range cell.shapes { + s := c.index.Shape(clipped.shapeID) + for j := 0; j < clipped.numEdges(); j++ { + edgeMap[s] = append(edgeMap[s], clipped.edges[j]) + } + } + } + + if len(c.cells) > 1 { + for s, edges := range edgeMap { + edgeMap[s] = uniqueInts(edges) + } + } + + return edgeMap +} + +// getCells returns the set of ShapeIndexCells that might contain edges intersecting +// the edge AB in the given cell root. This method is used primarily by loop and shapeutil. +func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell { + aUV, bUV, ok := ClipToFace(a, b, root.id.Face()) + if ok { + c.a = aUV + c.b = bUV + edgeBound := r2.RectFromPoints(c.a, c.b) + if root.Bound().Intersects(edgeBound) { + c.computeCellsIntersected(root, edgeBound) + } + } + + if len(c.cells) == 0 { + return nil + } + + return c.cells +} + +// getCellsForEdge populates the cells field to the set of index cells intersected by an edge AB. +func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) { + c.cells = nil + + segments := FaceSegments(a, b) + for _, segment := range segments { + c.a = segment.a + c.b = segment.b + + // Optimization: rather than always starting the recursive subdivision at + // the top level face cell, instead we start at the smallest S2CellId that + // contains the edge (the edge root cell). This typically lets us skip + // quite a few levels of recursion since most edges are short. + edgeBound := r2.RectFromPoints(c.a, c.b) + pcell := PaddedCellFromCellID(CellIDFromFace(segment.face), 0) + edgeRoot := pcell.ShrinkToFit(edgeBound) + + // Now we need to determine how the edge root cell is related to the cells + // in the spatial index (cellMap). There are three cases: + // + // 1. edgeRoot is an index cell or is contained within an index cell. + // In this case we only need to look at the contents of that cell. + // 2. edgeRoot is subdivided into one or more index cells. In this case + // we recursively subdivide to find the cells intersected by AB. + // 3. edgeRoot does not intersect any index cells. In this case there + // is nothing to do. + relation := c.iter.LocateCellID(edgeRoot) + if relation == Indexed { + // edgeRoot is an index cell or is contained by an index cell (case 1). + c.cells = append(c.cells, c.iter.IndexCell()) + } else if relation == Subdivided { + // edgeRoot is subdivided into one or more index cells (case 2). We + // find the cells intersected by AB using recursive subdivision. + if !edgeRoot.isFace() { + pcell = PaddedCellFromCellID(edgeRoot, 0) + } + c.computeCellsIntersected(pcell, edgeBound) + } + } +} + +// computeCellsIntersected computes the index cells intersected by the current +// edge that are descendants of pcell and adds them to this queries set of cells. +func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) { + + c.iter.seek(pcell.id.RangeMin()) + if c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() { + // The index does not contain pcell or any of its descendants. + return + } + if c.iter.CellID() == pcell.id { + // The index contains this cell exactly. + c.cells = append(c.cells, c.iter.IndexCell()) + return + } + + // Otherwise, split the edge among the four children of pcell. + center := pcell.Middle().Lo() + + if edgeBound.X.Hi < center.X { + // Edge is entirely contained in the two left children. + c.clipVAxis(edgeBound, center.Y, 0, pcell) + return + } else if edgeBound.X.Lo >= center.X { + // Edge is entirely contained in the two right children. + c.clipVAxis(edgeBound, center.Y, 1, pcell) + return + } + + childBounds := c.splitUBound(edgeBound, center.X) + if edgeBound.Y.Hi < center.Y { + // Edge is entirely contained in the two lower children. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0]) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1]) + } else if edgeBound.Y.Lo >= center.Y { + // Edge is entirely contained in the two upper children. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0]) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1]) + } else { + // The edge bound spans all four children. The edge itself intersects + // at most three children (since no padding is being used). + c.clipVAxis(childBounds[0], center.Y, 0, pcell) + c.clipVAxis(childBounds[1], center.Y, 1, pcell) + } +} + +// clipVAxis computes the intersected cells recursively for a given padded cell. +// Given either the left (i=0) or right (i=1) side of a padded cell pcell, +// determine whether the current edge intersects the lower child, upper child, +// or both children, and call c.computeCellsIntersected recursively on those children. +// The center is the v-coordinate at the center of pcell. +func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int, pcell *PaddedCell) { + if edgeBound.Y.Hi < center { + // Edge is entirely contained in the lower child. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), edgeBound) + } else if edgeBound.Y.Lo >= center { + // Edge is entirely contained in the upper child. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), edgeBound) + } else { + // The edge intersects both children. + childBounds := c.splitVBound(edgeBound, center) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), childBounds[0]) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), childBounds[1]) + } +} + +// splitUBound returns the bound for two children as a result of spliting the +// current edge at the given value U. +func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect { + v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y)) + // diag indicates which diagonal of the bounding box is spanned by AB: + // it is 0 if AB has positive slope, and 1 if AB has negative slope. + var diag int + if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) { + diag = 1 + } + return splitBound(edgeBound, 0, diag, u, v) +} + +// splitVBound returns the bound for two children as a result of spliting the +// current edge into two child edges at the given value V. +func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect { + u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X)) + var diag int + if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) { + diag = 1 + } + return splitBound(edgeBound, diag, 0, u, v) +} + +// splitBound returns the bounds for the two childrenn as a result of spliting +// the current edge into two child edges at the given point (u,v). uEnd and vEnd +// indicate which bound endpoints of the first child will be updated. +func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect { + var childBounds = [2]r2.Rect{ + edgeBound, + edgeBound, + } + + if uEnd == 1 { + childBounds[0].X.Lo = u + childBounds[1].X.Hi = u + } else { + childBounds[0].X.Hi = u + childBounds[1].X.Lo = u + } + + if vEnd == 1 { + childBounds[0].Y.Lo = v + childBounds[1].Y.Hi = v + } else { + childBounds[0].Y.Hi = v + childBounds[1].Y.Lo = v + } + + return childBounds +} diff --git a/vendor/github.com/blevesearch/geo/s2/distance_target.go b/vendor/github.com/blevesearch/geo/s2/distance_target.go new file mode 100644 index 0000000..066bbac --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/distance_target.go @@ -0,0 +1,149 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "github.com/golang/geo/s1" +) + +// The distance interface represents a set of common methods used by algorithms +// that compute distances between various S2 types. +type distance interface { + // chordAngle returns this type as a ChordAngle. + chordAngle() s1.ChordAngle + + // fromChordAngle is used to type convert a ChordAngle to this type. + // This is to work around needing to be clever in parts of the code + // where a distanceTarget interface method expects distances, but the + // user only supplies a ChordAngle, and we need to dynamically cast it + // to an appropriate distance interface types. + fromChordAngle(o s1.ChordAngle) distance + + // zero returns a zero distance. + zero() distance + // negative returns a value smaller than any valid value. + negative() distance + // infinity returns a value larger than any valid value. + infinity() distance + + // less is similar to the Less method in Sort. To get minimum values, + // this would be a less than type operation. For maximum, this would + // be a greater than type operation. + less(other distance) bool + + // sub subtracts the other value from this one and returns the new value. + // This is done as a method and not simple mathematical operation to + // allow closest and furthest to implement this in opposite ways. + sub(other distance) distance + + // chordAngleBound reports the upper bound on a ChordAngle corresponding + // to this distance. For example, if distance measures WGS84 ellipsoid + // distance then the corresponding angle needs to be 0.56% larger. + chordAngleBound() s1.ChordAngle + + // updateDistance may update the value this distance represents + // based on the given input. The updated value and a boolean reporting + // if the value was changed are returned. + updateDistance(other distance) (distance, bool) +} + +// distanceTarget is an interface that represents a geometric type to which distances +// are measured. +// +// For example, there are implementations that measure distances to a Point, +// an Edge, a Cell, a CellUnion, and even to an arbitrary collection of geometry +// stored in ShapeIndex. +// +// The distanceTarget types are provided for the benefit of types that measure +// distances and/or find nearby geometry, such as ClosestEdgeQuery, FurthestEdgeQuery, +// ClosestPointQuery, and ClosestCellQuery, etc. +type distanceTarget interface { + // capBound returns a Cap that bounds the set of points whose distance to the + // target is distance.zero(). + capBound() Cap + + // updateDistanceToPoint updates the distance if the distance to + // the point P is within than the given dist. + // The boolean reports if the value was updated. + updateDistanceToPoint(p Point, dist distance) (distance, bool) + + // updateDistanceToEdge updates the distance if the distance to + // the edge E is within than the given dist. + // The boolean reports if the value was updated. + updateDistanceToEdge(e Edge, dist distance) (distance, bool) + + // updateDistanceToCell updates the distance if the distance to the cell C + // (including its interior) is within than the given dist. + // The boolean reports if the value was updated. + updateDistanceToCell(c Cell, dist distance) (distance, bool) + + // setMaxError potentially updates the value of MaxError, and reports if + // the specific type supports altering it. Whenever one of the + // updateDistanceTo... methods above returns true, the returned distance + // is allowed to be up to maxError larger than the true minimum distance. + // In other words, it gives this target object permission to terminate its + // distance calculation as soon as it has determined that (1) the minimum + // distance is less than minDist and (2) the best possible further + // improvement is less than maxError. + // + // If the target takes advantage of maxError to optimize its distance + // calculation, this method must return true. (Most target types will + // default to return false.) + setMaxError(maxErr s1.ChordAngle) bool + + // maxBruteForceIndexSize reports the maximum number of indexed objects for + // which it is faster to compute the distance by brute force (e.g., by testing + // every edge) rather than by using an index. + // + // The following method is provided as a convenience for types that compute + // distances to a collection of indexed geometry, such as ClosestEdgeQuery + // and ClosestPointQuery. + // + // Types that do not support this should return a -1. + maxBruteForceIndexSize() int + + // distance returns an instance of the underlying distance type this + // target uses. This is to work around the use of Templates in the C++. + distance() distance + + // visitContainingShapes finds all polygons in the given index that + // completely contain a connected component of the target geometry. (For + // example, if the target consists of 10 points, this method finds + // polygons that contain any of those 10 points.) For each such polygon, + // the visit function is called with the Shape of the polygon along with + // a point of the target geometry that is contained by that polygon. + // + // Optionally, any polygon that intersects the target geometry may also be + // returned. In other words, this method returns all polygons that + // contain any connected component of the target, along with an arbitrary + // subset of the polygons that intersect the target. + // + // For example, suppose that the index contains two abutting polygons + // A and B. If the target consists of two points "a" contained by A and + // "b" contained by B, then both A and B are returned. But if the target + // consists of the edge "ab", then any subset of {A, B} could be returned + // (because both polygons intersect the target but neither one contains + // the edge "ab"). + // + // If the visit function returns false, this method terminates early and + // returns false as well. Otherwise returns true. + // + // NOTE(roberts): This method exists only for the purpose of implementing + // edgeQuery IncludeInteriors efficiently. + visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool +} + +// shapePointVisitorFunc defines a type of function the visitContainingShapes can call. +type shapePointVisitorFunc func(containingShape Shape, targetPoint Point) bool diff --git a/vendor/github.com/blevesearch/geo/s2/doc.go b/vendor/github.com/blevesearch/geo/s2/doc.go new file mode 100644 index 0000000..43e7a63 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/doc.go @@ -0,0 +1,29 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package s2 is a library for working with geometry in S² (spherical geometry). + +Its related packages, parallel to this one, are s1 (operates on S¹), r1 (operates on ℝ¹), +r2 (operates on ℝ²) and r3 (operates on ℝ³). + +This package provides types and functions for the S2 cell hierarchy and coordinate systems. +The S2 cell hierarchy is a hierarchical decomposition of the surface of a unit sphere (S²) +into ``cells''; it is highly efficient, scales from continental size to under 1 cm² +and preserves spatial locality (nearby cells have close IDs). + +More information including an in-depth introduction to S2 can be found on the +S2 website https://s2geometry.io/ +*/ +package s2 diff --git a/vendor/github.com/blevesearch/geo/s2/edge_clipping.go b/vendor/github.com/blevesearch/geo/s2/edge_clipping.go new file mode 100644 index 0000000..57a53bf --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/edge_clipping.go @@ -0,0 +1,672 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file contains a collection of methods for: +// +// (1) Robustly clipping geodesic edges to the faces of the S2 biunit cube +// (see s2stuv), and +// +// (2) Robustly clipping 2D edges against 2D rectangles. +// +// These functions can be used to efficiently find the set of CellIDs that +// are intersected by a geodesic edge (e.g., see CrossingEdgeQuery). + +import ( + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" + "github.com/golang/geo/r3" +) + +const ( + // edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate + // compared to the exact result, assuming that the points A and B are in + // the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less). + edgeClipErrorUVCoord = 2.25 * dblEpsilon + + // edgeClipErrorUVDist is the maximum distance from a clipped point to + // the corresponding exact result. It is equal to the error in a single + // coordinate because at most one coordinate is subject to error. + edgeClipErrorUVDist = 2.25 * dblEpsilon + + // faceClipErrorRadians is the maximum angle between a returned vertex + // and the nearest point on the exact edge AB. It is equal to the + // maximum directional error in PointCross, plus the error when + // projecting points onto a cube face. + faceClipErrorRadians = 3 * dblEpsilon + + // faceClipErrorDist is the same angle expressed as a maximum distance + // in (u,v)-space. In other words, a returned vertex is at most this far + // from the exact edge AB projected into (u,v)-space. + faceClipErrorUVDist = 9 * dblEpsilon + + // faceClipErrorUVCoord is the maximum angle between a returned vertex + // and the nearest point on the exact edge AB expressed as the maximum error + // in an individual u- or v-coordinate. In other words, for each + // returned vertex there is a point on the exact edge AB whose u- and + // v-coordinates differ from the vertex by at most this amount. + faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon + + // intersectsRectErrorUVDist is the maximum error when computing if a point + // intersects with a given Rect. If some point of AB is inside the + // rectangle by at least this distance, the result is guaranteed to be true; + // if all points of AB are outside the rectangle by at least this distance, + // the result is guaranteed to be false. This bound assumes that rect is + // a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it + // (e.g., by 1e-10 or less). + intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon +) + +// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that +// intersects the given face, or false if the edge AB does not intersect. +// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1] +// cube face rectangle and are within faceClipErrorUVDist of the line AB, but +// the results may differ from those produced by FaceSegments. +func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) { + return ClipToPaddedFace(a, b, face, 0.0) +} + +// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that +// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1] +// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding). +// Padding must be non-negative. +func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) { + // Fast path: both endpoints are on the given face. + if face(a.Vector) == f && face(b.Vector) == f { + au, av := validFaceXYZToUV(f, a.Vector) + bu, bv := validFaceXYZToUV(f, b.Vector) + return r2.Point{au, av}, r2.Point{bu, bv}, true + } + + // Convert everything into the (u,v,w) coordinates of the given face. Note + // that the cross product *must* be computed in the original (x,y,z) + // coordinate system because PointCross (unlike the mathematical cross + // product) can produce different results in different coordinate systems + // when one argument is a linear multiple of the other, due to the use of + // symbolic perturbations. + normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b))) + aUVW := pointUVW(faceXYZtoUVW(f, a)) + bUVW := pointUVW(faceXYZtoUVW(f, b)) + + // Padding is handled by scaling the u- and v-components of the normal. + // Letting R=1+padding, this means that when we compute the dot product of + // the normal with a cube face vertex (such as (-1,-1,1)), we will actually + // compute the dot product with the scaled vertex (-R,-R,1). This allows + // methods such as intersectsFace, exitAxis, etc, to handle padding + // with no further modifications. + scaleUV := 1 + padding + scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}} + if !scaledN.intersectsFace() { + return aUV, bUV, false + } + + // TODO(roberts): This is a workaround for extremely small vectors where some + // loss of precision can occur in Normalize causing underflow. When PointCross + // is updated to work around this, this can be removed. + if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) { + normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))} + } + + normUVW = pointUVW{normUVW.Normalize()} + + aTan := pointUVW{normUVW.Cross(aUVW.Vector)} + bTan := pointUVW{bUVW.Cross(normUVW.Vector)} + + // As described in clipDestination, if the sum of the scores from clipping the two + // endpoints is 3 or more, then the segment does not intersect this face. + aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV) + bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV) + + return aUV, bUV, aScore+bScore < 3 +} + +// ClipEdge returns the portion of the edge defined by AB that is contained by the +// given rectangle. If there is no intersection, false is returned and aClip and bClip +// are undefined. +func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) { + // Compute the bounding rectangle of AB, clip it, and then extract the new + // endpoints from the clipped bound. + bound := r2.RectFromPoints(a, b) + if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects { + return aClip, bClip, false + } + ai := 0 + if a.X > b.X { + ai = 1 + } + aj := 0 + if a.Y > b.Y { + aj = 1 + } + + return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true +} + +// The three functions below (sumEqual, intersectsFace, intersectsOppositeEdges) +// all compare a sum (u + v) to a third value w. They are implemented in such a +// way that they produce an exact result even though all calculations are done +// with ordinary floating-point operations. Here are the principles on which these +// functions are based: +// +// A. If u + v < w in floating-point, then u + v < w in exact arithmetic. +// +// B. If u + v < w in exact arithmetic, then at least one of the following +// expressions is true in floating-point: +// u + v < w +// u < w - v +// v < w - u +// +// Proof: By rearranging terms and substituting ">" for "<", we can assume +// that all values are non-negative. Now clearly "w" is not the smallest +// value, so assume WLOG that "u" is the smallest. We want to show that +// u < w - v in floating-point. If v >= w/2, the calculation of w - v is +// exact since the result is smaller in magnitude than either input value, +// so the result holds. Otherwise we have u <= v < w/2 and w - v >= w/2 +// (even in floating point), so the result also holds. + +// sumEqual reports whether u + v == w exactly. +func sumEqual(u, v, w float64) bool { + return (u+v == w) && (u == w-v) && (v == w-u) +} + +// pointUVW represents a Point in (u,v,w) coordinate space of a cube face. +type pointUVW Point + +// intersectsFace reports whether a given directed line L intersects the cube face F. +// The line L is defined by its normal N in the (u,v,w) coordinates of F. +func (p pointUVW) intersectsFace() bool { + // L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot + // products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1), + // and (-1,1,1) do not all have the same sign. This is true exactly when + // |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly. + u := math.Abs(p.X) + v := math.Abs(p.Y) + w := math.Abs(p.Z) + + // We only need to consider the cases where u or v is the smallest value, + // since if w is the smallest then both expressions below will have a + // positive LHS and a negative RHS. + return (v >= w-u) && (u >= w-v) +} + +// intersectsOppositeEdges reports whether a directed line L intersects two +// opposite edges of a cube face F. This includs the case where L passes +// exactly through a corner vertex of F. The directed line L is defined +// by its normal N in the (u,v,w) coordinates of F. +func (p pointUVW) intersectsOppositeEdges() bool { + // The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if + // and only exactly two of the corner vertices lie on each side of L. This + // is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this + // expression exactly. + u := math.Abs(p.X) + v := math.Abs(p.Y) + w := math.Abs(p.Z) + + // If w is the smallest, the following line returns an exact result. + if math.Abs(u-v) != w { + return math.Abs(u-v) >= w + } + + // Otherwise u - v = w exactly, or w is not the smallest value. In either + // case the following returns the correct result. + if u >= v { + return u-w >= v + } + return v-w >= u +} + +// axis represents the possible results of exitAxis. +type axis int + +const ( + axisU axis = iota + axisV +) + +// exitAxis reports which axis the directed line L exits the cube face F on. +// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates +// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits +// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly +// through a corner vertex of the cube face. +func (p pointUVW) exitAxis() axis { + if p.intersectsOppositeEdges() { + // The line passes through through opposite edges of the face. + // It exits through the v=+1 or v=-1 edge if the u-component of N has a + // larger absolute magnitude than the v-component. + if math.Abs(p.X) >= math.Abs(p.Y) { + return axisV + } + return axisU + } + + // The line passes through through two adjacent edges of the face. + // It exits the v=+1 or v=-1 edge if an even number of the components of N + // are negative. We test this using signbit() rather than multiplication + // to avoid the possibility of underflow. + var x, y, z int + if math.Signbit(p.X) { + x = 1 + } + if math.Signbit(p.Y) { + y = 1 + } + if math.Signbit(p.Z) { + z = 1 + } + + if x^y^z == 0 { + return axisV + } + return axisU +} + +// exitPoint returns the UV coordinates of the point where a directed line L (represented +// by the CCW normal of this point), exits the cube face this point is derived from along +// the given axis. +func (p pointUVW) exitPoint(a axis) r2.Point { + if a == axisU { + u := -1.0 + if p.Y > 0 { + u = 1.0 + } + return r2.Point{u, (-u*p.X - p.Z) / p.Y} + } + + v := -1.0 + if p.X < 0 { + v = 1.0 + } + return r2.Point{(-v*p.Y - p.Z) / p.X, v} +} + +// clipDestination returns a score which is used to indicate if the clipped edge AB +// on the given face intersects the face at all. This function returns the score for +// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores +// from both of the endpoints is 3 or more, then edge AB does not intersect this face. +// +// First, it clips the line segment AB to find the clipped destination B' on a given +// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w) +// coordinates of that face.) Second, it partially computes whether the segment AB +// intersects this face at all. The actual condition is fairly complicated, but it +// turns out that it can be expressed as a "score" that can be computed independently +// when clipping the two endpoints A and B. +func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) { + var uv r2.Point + + // Optimization: if B is within the safe region of the face, use it. + maxSafeUVCoord := 1 - faceClipErrorUVCoord + if b.Z > 0 { + uv = r2.Point{b.X / b.Z, b.Y / b.Z} + if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord { + return uv, 0 + } + } + + // Otherwise find the point B' where the line AB exits the face. + uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV) + + p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}}) + + // Determine if the exit point B' is contained within the segment. We do this + // by computing the dot products with two inward-facing tangent vectors at A + // and B. If either dot product is negative, we say that B' is on the "wrong + // side" of that point. As the point B' moves around the great circle AB past + // the segment endpoint B, it is initially on the wrong side of B only; as it + // moves further it is on the wrong side of both endpoints; and then it is on + // the wrong side of A only. If the exit point B' is on the wrong side of + // either endpoint, we can't use it; instead the segment is clipped at the + // original endpoint B. + // + // We reject the segment if the sum of the scores of the two endpoints is 3 + // or more. Here is what that rule encodes: + // - If B' is on the wrong side of A, then the other clipped endpoint A' + // must be in the interior of AB (otherwise AB' would go the wrong way + // around the circle). There is a similar rule for A'. + // - If B' is on the wrong side of either endpoint (and therefore we must + // use the original endpoint B instead), then it must be possible to + // project B onto this face (i.e., its w-coordinate must be positive). + // This rule is only necessary to handle certain zero-length edges (A=B). + score := 0 + if p.Sub(a.Vector).Dot(aTan.Vector) < 0 { + score = 2 // B' is on wrong side of A. + } else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 { + score = 1 // B' is on wrong side of B. + } + + if score > 0 { // B' is not in the interior of AB. + if b.Z <= 0 { + score = 3 // B cannot be projected onto this face. + } else { + uv = r2.Point{b.X / b.Z, b.Y / b.Z} + } + } + + return uv, score +} + +// updateEndpoint returns the interval with the specified endpoint updated to +// the given value. If the value lies beyond the opposite endpoint, nothing is +// changed and false is returned. +func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) { + if !highEndpoint { + if bound.Hi < value { + return bound, false + } + if bound.Lo < value { + bound.Lo = value + } + return bound, true + } + + if bound.Lo > value { + return bound, false + } + if bound.Hi > value { + bound.Hi = value + } + return bound, true +} + +// clipBoundAxis returns the clipped versions of the bounding intervals for the given +// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the +// given clip interval. negSlope is a precomputed helper variable that indicates which +// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope, +// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds, +// false is returned. +func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval, + negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) { + + if bound0.Lo < clip.Lo { + // If the upper bound is below the clips lower bound, there is nothing to do. + if bound0.Hi < clip.Lo { + return bound0, bound1, false + } + // narrow the intervals lower bound to the clip bound. + bound0.Lo = clip.Lo + if bound1, updated = updateEndpoint(bound1, negSlope, interpolateFloat64(clip.Lo, a0, b0, a1, b1)); !updated { + return bound0, bound1, false + } + } + + if bound0.Hi > clip.Hi { + // If the lower bound is above the clips upper bound, there is nothing to do. + if bound0.Lo > clip.Hi { + return bound0, bound1, false + } + // narrow the intervals upper bound to the clip bound. + bound0.Hi = clip.Hi + if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateFloat64(clip.Hi, a0, b0, a1, b1)); !updated { + return bound0, bound1, false + } + } + return bound0, bound1, true +} + +// edgeIntersectsRect reports whether the edge defined by AB intersects the +// given closed rectangle to within the error bound. +func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool { + // First check whether the bounds of a Rect around AB intersects the given rect. + if !r.Intersects(r2.RectFromPoints(a, b)) { + return false + } + + // Otherwise AB intersects the rect if and only if all four vertices of rect + // do not lie on the same side of the extended line AB. We test this by finding + // the two vertices of rect with minimum and maximum projections onto the normal + // of AB, and computing their dot products with the edge normal. + n := b.Sub(a).Ortho() + + i := 0 + if n.X >= 0 { + i = 1 + } + j := 0 + if n.Y >= 0 { + j = 1 + } + + max := n.Dot(r.VertexIJ(i, j).Sub(a)) + min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a)) + + return (max >= 0) && (min <= 0) +} + +// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined +// by AB intersected by clip. The resulting bound may be empty. This is a convenience +// function built on top of clipEdgeBound. +func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect { + bound := r2.RectFromPoints(a, b) + if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects { + return b1 + } + return r2.EmptyRect() +} + +// clipEdgeBound clips an edge AB to sequence of rectangles efficiently. +// It represents the clipped edges by their bounding boxes rather than as a pair of +// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be +// a tight bound of A'B'. This function returns the bound that is a tight bound +// of A'B' intersected with a given rectangle. If A'B' does not intersect clip, +// it returns false and the original bound. +func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) { + // negSlope indicates which diagonal of the bounding box is spanned by AB: it + // is false if AB has positive slope, and true if AB has negative slope. This is + // used to determine which interval endpoints need to be updated each time + // the edge is clipped. + negSlope := (a.X > b.X) != (a.Y > b.Y) + + b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X) + if !up1 { + return bound, false + } + b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y) + if !up2 { + return r2.Rect{b0x, b0y}, false + } + return r2.Rect{X: b1x, Y: b1y}, true +} + +// interpolateFloat64 returns a value with the same combination of a1 and b1 as the +// given value x is of a and b. This function makes the following guarantees: +// - If x == a, then x1 = a1 (exactly). +// - If x == b, then x1 = b1 (exactly). +// - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1). +// This requires a != b. +func interpolateFloat64(x, a, b, a1, b1 float64) float64 { + // To get results that are accurate near both A and B, we interpolate + // starting from the closer of the two points. + if math.Abs(a-x) <= math.Abs(b-x) { + return a1 + (b1-a1)*(x-a)/(b-a) + } + return b1 + (a1-b1)*(x-b)/(a-b) +} + +// FaceSegment represents an edge AB clipped to an S2 cube face. It is +// represented by a face index and a pair of (u,v) coordinates. +type FaceSegment struct { + face int + a, b r2.Point +} + +// FaceSegments subdivides the given edge AB at every point where it crosses the +// boundary between two S2 cube faces and returns the corresponding FaceSegments. +// The segments are returned in order from A toward B. The input points must be +// unit length. +// +// This function guarantees that the returned segments form a continuous path +// from A to B, and that all vertices are within faceClipErrorUVDist of the +// line AB. All vertices lie within the [-1,1]x[-1,1] cube face rectangles. +// The results are consistent with Sign, i.e. the edge is well-defined even its +// endpoints are antipodal. +// TODO(roberts): Extend the implementation of PointCross so that this is true. +func FaceSegments(a, b Point) []FaceSegment { + var segment FaceSegment + + // Fast path: both endpoints are on the same face. + var aFace, bFace int + aFace, segment.a.X, segment.a.Y = xyzToFaceUV(a.Vector) + bFace, segment.b.X, segment.b.Y = xyzToFaceUV(b.Vector) + if aFace == bFace { + segment.face = aFace + return []FaceSegment{segment} + } + + // Starting at A, we follow AB from face to face until we reach the face + // containing B. The following code is designed to ensure that we always + // reach B, even in the presence of numerical errors. + // + // First we compute the normal to the plane containing A and B. This normal + // becomes the ultimate definition of the line AB; it is used to resolve all + // questions regarding where exactly the line goes. Unfortunately due to + // numerical errors, the line may not quite intersect the faces containing + // the original endpoints. We handle this by moving A and/or B slightly if + // necessary so that they are on faces intersected by the line AB. + ab := a.PointCross(b) + + aFace, segment.a = moveOriginToValidFace(aFace, a, ab, segment.a) + bFace, segment.b = moveOriginToValidFace(bFace, b, Point{ab.Mul(-1)}, segment.b) + + // Now we simply follow AB from face to face until we reach B. + var segments []FaceSegment + segment.face = aFace + bSaved := segment.b + + for face := aFace; face != bFace; { + // Complete the current segment by finding the point where AB + // exits the current face. + z := faceXYZtoUVW(face, ab) + n := pointUVW{z.Vector} + + exitAxis := n.exitAxis() + segment.b = n.exitPoint(exitAxis) + segments = append(segments, segment) + + // Compute the next face intersected by AB, and translate the exit + // point of the current segment into the (u,v) coordinates of the + // next face. This becomes the first point of the next segment. + exitXyz := faceUVToXYZ(face, segment.b.X, segment.b.Y) + face = nextFace(face, segment.b, exitAxis, n, bFace) + exitUvw := faceXYZtoUVW(face, Point{exitXyz}) + segment.face = face + segment.a = r2.Point{exitUvw.X, exitUvw.Y} + } + // Finish the last segment. + segment.b = bSaved + return append(segments, segment) +} + +// moveOriginToValidFace updates the origin point to a valid face if necessary. +// Given a line segment AB whose origin A has been projected onto a given cube +// face, determine whether it is necessary to project A onto a different face +// instead. This can happen because the normal of the line AB is not computed +// exactly, so that the line AB (defined as the set of points perpendicular to +// the normal) may not intersect the cube face containing A. Even if it does +// intersect the face, the exit point of the line from that face may be on +// the wrong side of A (i.e., in the direction away from B). If this happens, +// we reproject A onto the adjacent face where the line AB approaches A most +// closely. This moves the origin by a small amount, but never more than the +// error tolerances. +func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point) { + // Fast path: if the origin is sufficiently far inside the face, it is + // always safe to use it. + const maxSafeUVCoord = 1 - faceClipErrorUVCoord + if math.Max(math.Abs((aUV).X), math.Abs((aUV).Y)) <= maxSafeUVCoord { + return face, aUV + } + + // Otherwise check whether the normal AB even intersects this face. + z := faceXYZtoUVW(face, ab) + n := pointUVW{z.Vector} + if n.intersectsFace() { + // Check whether the point where the line AB exits this face is on the + // wrong side of A (by more than the acceptable error tolerance). + uv := n.exitPoint(n.exitAxis()) + exit := faceUVToXYZ(face, uv.X, uv.Y) + aTangent := ab.Normalize().Cross(a.Vector) + + // We can use the given face. + if exit.Sub(a.Vector).Dot(aTangent) >= -faceClipErrorRadians { + return face, aUV + } + } + + // Otherwise we reproject A to the nearest adjacent face. (If line AB does + // not pass through a given face, it must pass through all adjacent faces.) + var dir int + if math.Abs((aUV).X) >= math.Abs((aUV).Y) { + // U-axis + if aUV.X > 0 { + dir = 1 + } + face = uvwFace(face, 0, dir) + } else { + // V-axis + if aUV.Y > 0 { + dir = 1 + } + face = uvwFace(face, 1, dir) + } + + aUV.X, aUV.Y = validFaceXYZToUV(face, a.Vector) + aUV.X = math.Max(-1.0, math.Min(1.0, aUV.X)) + aUV.Y = math.Max(-1.0, math.Min(1.0, aUV.Y)) + + return face, aUV +} + +// nextFace returns the next face that should be visited by FaceSegments, given that +// we have just visited face and we are following the line AB (represented +// by its normal N in the (u,v,w) coordinates of that face). The other +// arguments include the point where AB exits face, the corresponding +// exit axis, and the target face containing the destination point B. +func nextFace(face int, exit r2.Point, axis axis, n pointUVW, targetFace int) int { + // this bit is to work around C++ cleverly casting bools to ints for you. + exitA := exit.X + exit1MinusA := exit.Y + + if axis == axisV { + exitA = exit.Y + exit1MinusA = exit.X + } + exitAPos := 0 + if exitA > 0 { + exitAPos = 1 + } + exit1MinusAPos := 0 + if exit1MinusA > 0 { + exit1MinusAPos = 1 + } + + // We return the face that is adjacent to the exit point along the given + // axis. If line AB exits *exactly* through a corner of the face, there are + // two possible next faces. If one is the target face containing B, then + // we guarantee that we advance to that face directly. + // + // The three conditions below check that (1) AB exits approximately through + // a corner, (2) the adjacent face along the non-exit axis is the target + // face, and (3) AB exits *exactly* through the corner. (The sumEqual + // code checks whether the dot product of (u,v,1) and n is exactly zero.) + if math.Abs(exit1MinusA) == 1 && + uvwFace(face, int(1-axis), exit1MinusAPos) == targetFace && + sumEqual(exit.X*n.X, exit.Y*n.Y, -n.Z) { + return targetFace + } + + // Otherwise return the face that is adjacent to the exit point in the + // direction of the exit axis. + return uvwFace(face, int(axis), exitAPos) +} diff --git a/vendor/github.com/blevesearch/geo/s2/edge_crosser.go b/vendor/github.com/blevesearch/geo/s2/edge_crosser.go new file mode 100644 index 0000000..69c6da6 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/edge_crosser.go @@ -0,0 +1,227 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" +) + +// EdgeCrosser allows edges to be efficiently tested for intersection with a +// given fixed edge AB. It is especially efficient when testing for +// intersection with an edge chain connecting vertices v0, v1, v2, ... +// +// Example usage: +// +// func CountIntersections(a, b Point, edges []Edge) int { +// count := 0 +// crosser := NewEdgeCrosser(a, b) +// for _, edge := range edges { +// if crosser.CrossingSign(&edge.First, &edge.Second) != DoNotCross { +// count++ +// } +// } +// return count +// } +// +type EdgeCrosser struct { + a Point + b Point + aXb Point + + // To reduce the number of calls to expensiveSign, we compute an + // outward-facing tangent at A and B if necessary. If the plane + // perpendicular to one of these tangents separates AB from CD (i.e., one + // edge on each side) then there is no intersection. + aTangent Point // Outward-facing tangent at A. + bTangent Point // Outward-facing tangent at B. + + // The fields below are updated for each vertex in the chain. + c Point // Previous vertex in the vertex chain. + acb Direction // The orientation of triangle ACB. +} + +// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB. +func NewEdgeCrosser(a, b Point) *EdgeCrosser { + norm := a.PointCross(b) + return &EdgeCrosser{ + a: a, + b: b, + aXb: Point{a.Cross(b.Vector)}, + aTangent: Point{a.Cross(norm.Vector)}, + bTangent: Point{norm.Cross(b.Vector)}, + } +} + +// CrossingSign reports whether the edge AB intersects the edge CD. If any two +// vertices from different edges are the same, returns MaybeCross. If either edge +// is degenerate (A == B or C == D), returns either DoNotCross or MaybeCross. +// +// Properties of CrossingSign: +// +// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d) +// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d) +// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d +// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d +// +// Note that if you want to check an edge against a chain of other edges, +// it is slightly more efficient to use the single-argument version +// ChainCrossingSign below. +func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing { + if c != e.c { + e.RestartAt(c) + } + return e.ChainCrossingSign(d) +} + +// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and +// CD share a vertex and VertexCrossing(a, b, c, d) is true. +// +// This method extends the concept of a "crossing" to the case where AB +// and CD have a vertex in common. The two edges may or may not cross, +// according to the rules defined in VertexCrossing above. The rules +// are designed so that point containment tests can be implemented simply +// by counting edge crossings. Similarly, determining whether one edge +// chain crosses another edge chain can be implemented by counting. +func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool { + if c != e.c { + e.RestartAt(c) + } + return e.EdgeOrVertexChainCrossing(d) +} + +// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge, +// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)). +// +// You don't need to use this or any of the chain functions unless you're trying to +// squeeze out every last drop of performance. Essentially all you are saving is a test +// whether the first vertex of the current edge is the same as the second vertex of the +// previous edge. +func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser { + e := NewEdgeCrosser(a, b) + e.RestartAt(c) + return e +} + +// RestartAt sets the current point of the edge crosser to be c. +// Call this method when your chain 'jumps' to a new place. +// The argument must point to a value that persists until the next call. +func (e *EdgeCrosser) RestartAt(c Point) { + e.c = c + e.acb = -triageSign(e.a, e.b, e.c) +} + +// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of +// the crossing methods (or RestartAt) as the first vertex of the current edge. +func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing { + // For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must + // all be oriented the same way (CW or CCW). We keep the orientation of ACB + // as part of our state. When each new point D arrives, we compute the + // orientation of BDA and check whether it matches ACB. This checks whether + // the points C and D are on opposite sides of the great circle through AB. + + // Recall that triageSign is invariant with respect to rotating its + // arguments, i.e. ABD has the same orientation as BDA. + bda := triageSign(e.a, e.b, d) + if e.acb == -bda && bda != Indeterminate { + // The most common case -- triangles have opposite orientations. Save the + // current vertex D as the next vertex C, and also save the orientation of + // the new triangle ACB (which is opposite to the current triangle BDA). + e.c = d + e.acb = -bda + return DoNotCross + } + return e.crossingSign(d, bda) +} + +// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex +// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge. +func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool { + // We need to copy e.c since it is clobbered by ChainCrossingSign. + c := e.c + switch e.ChainCrossingSign(d) { + case DoNotCross: + return false + case Cross: + return true + } + return VertexCrossing(e.a, e.b, c, d) +} + +// crossingSign handle the slow path of CrossingSign. +func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing { + // Compute the actual result, and then save the current vertex D as the next + // vertex C, and save the orientation of the next triangle ACB (which is + // opposite to the current triangle BDA). + defer func() { + e.c = d + e.acb = -bda + }() + + // At this point, a very common situation is that A,B,C,D are four points on + // a line such that AB does not overlap CD. (For example, this happens when + // a line or curve is sampled finely, or when geometry is constructed by + // computing the union of S2CellIds.) Most of the time, we can determine + // that AB and CD do not intersect using the two outward-facing + // tangents at A and B (parallel to AB) and testing whether AB and CD are on + // opposite sides of the plane perpendicular to one of these tangents. This + // is moderately expensive but still much cheaper than expensiveSign. + + // The error in RobustCrossProd is insignificant. The maximum error in + // the call to CrossProd (i.e., the maximum norm of the error vector) is + // (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to + // DotProd below is dblEpsilon. (There is also a small relative error + // term that is insignificant because we are comparing the result against a + // constant that is very close to zero.) + maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon + if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) { + return DoNotCross + } + + // Otherwise, eliminate the cases where two vertices from different edges are + // equal. (These cases could be handled in the code below, but we would rather + // avoid calling ExpensiveSign if possible.) + if e.a == e.c || e.a == d || e.b == e.c || e.b == d { + return MaybeCross + } + + // Eliminate the cases where an input edge is degenerate. (Note that in + // most cases, if CD is degenerate then this method is not even called + // because acb and bda have different signs.) + if e.a == e.b || e.c == d { + return DoNotCross + } + + // Otherwise it's time to break out the big guns. + if e.acb == Indeterminate { + e.acb = -expensiveSign(e.a, e.b, e.c) + } + if bda == Indeterminate { + bda = expensiveSign(e.a, e.b, d) + } + + if bda != e.acb { + return DoNotCross + } + + cbd := -RobustSign(e.c, d, e.b) + if cbd != e.acb { + return DoNotCross + } + dac := RobustSign(e.c, d, e.a) + if dac != e.acb { + return DoNotCross + } + return Cross +} diff --git a/vendor/github.com/blevesearch/geo/s2/edge_crossings.go b/vendor/github.com/blevesearch/geo/s2/edge_crossings.go new file mode 100644 index 0000000..a98ec76 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/edge_crossings.go @@ -0,0 +1,396 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "math" + + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +const ( + // intersectionError can be set somewhat arbitrarily, because the algorithm + // uses more precision if necessary in order to achieve the specified error. + // The only strict requirement is that intersectionError >= dblEpsilon + // radians. However, using a larger error tolerance makes the algorithm more + // efficient because it reduces the number of cases where exact arithmetic is + // needed. + intersectionError = s1.Angle(8 * dblError) + + // intersectionMergeRadius is used to ensure that intersection points that + // are supposed to be coincident are merged back together into a single + // vertex. This is required in order for various polygon operations (union, + // intersection, etc) to work correctly. It is twice the intersection error + // because two coincident intersection points might have errors in + // opposite directions. + intersectionMergeRadius = 2 * intersectionError +) + +// A Crossing indicates how edges cross. +type Crossing int + +const ( + // Cross means the edges cross. + Cross Crossing = iota + // MaybeCross means two vertices from different edges are the same. + MaybeCross + // DoNotCross means the edges do not cross. + DoNotCross +) + +func (c Crossing) String() string { + switch c { + case Cross: + return "Cross" + case MaybeCross: + return "MaybeCross" + case DoNotCross: + return "DoNotCross" + default: + return fmt.Sprintf("(BAD CROSSING %d)", c) + } +} + +// CrossingSign reports whether the edge AB intersects the edge CD. +// If AB crosses CD at a point that is interior to both edges, Cross is returned. +// If any two vertices from different edges are the same it returns MaybeCross. +// Otherwise it returns DoNotCross. +// If either edge is degenerate (A == B or C == D), the return value is MaybeCross +// if two vertices from different edges are the same and DoNotCross otherwise. +// +// Properties of CrossingSign: +// +// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d) +// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d) +// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d +// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d +// +// This method implements an exact, consistent perturbation model such +// that no three points are ever considered to be collinear. This means +// that even if you have 4 points A, B, C, D that lie exactly in a line +// (say, around the equator), C and D will be treated as being slightly to +// one side or the other of AB. This is done in a way such that the +// results are always consistent (see RobustSign). +func CrossingSign(a, b, c, d Point) Crossing { + crosser := NewChainEdgeCrosser(a, b, c) + return crosser.ChainCrossingSign(d) +} + +// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon +// containment tests can be implemented by counting the number of edge crossings. +// +// Given two edges AB and CD where at least two vertices are identical +// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing" +// occurs if AB is encountered after CD during a CCW sweep around the shared +// vertex starting from a fixed reference point. +// +// Note that according to this rule, if AB crosses CD then in general CD +// does not cross AB. However, this leads to the correct result when +// counting polygon edge crossings. For example, suppose that A,B,C are +// three consecutive vertices of a CCW polygon. If we now consider the edge +// crossings of a segment BP as P sweeps around B, the crossing number +// changes parity exactly when BP crosses BA or BC. +// +// Useful properties of VertexCrossing (VC): +// +// (1) VC(a,a,c,d) == VC(a,b,c,c) == false +// (2) VC(a,b,a,b) == VC(a,b,b,a) == true +// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c) +// (3) If exactly one of a,b equals one of c,d, then exactly one of +// VC(a,b,c,d) and VC(c,d,a,b) is true +// +// It is an error to call this method with 4 distinct vertices. +func VertexCrossing(a, b, c, d Point) bool { + // If A == B or C == D there is no intersection. We need to check this + // case first in case 3 or more input points are identical. + if a == b || c == d { + return false + } + + // If any other pair of vertices is equal, there is a crossing if and only + // if OrderedCCW indicates that the edge AB is further CCW around the + // shared vertex O (either A or B) than the edge CD, starting from an + // arbitrary fixed reference point. + + // Optimization: if AB=CD or AB=DC, we can avoid most of the calculations. + switch { + case a == c: + return (b == d) || OrderedCCW(Point{a.Ortho()}, d, b, a) + case b == d: + return OrderedCCW(Point{b.Ortho()}, c, a, b) + case a == d: + return (b == c) || OrderedCCW(Point{a.Ortho()}, c, b, a) + case b == c: + return OrderedCCW(Point{b.Ortho()}, d, a, b) + } + + return false +} + +// EdgeOrVertexCrossing is a convenience function that calls CrossingSign to +// handle cases where all four vertices are distinct, and VertexCrossing to +// handle cases where two or more vertices are the same. This defines a crossing +// function such that point-in-polygon containment tests can be implemented +// by simply counting edge crossings. +func EdgeOrVertexCrossing(a, b, c, d Point) bool { + switch CrossingSign(a, b, c, d) { + case DoNotCross: + return false + case Cross: + return true + default: + return VertexCrossing(a, b, c, d) + } +} + +// Intersection returns the intersection point of two edges AB and CD that cross +// (CrossingSign(a,b,c,d) == Crossing). +// +// Useful properties of Intersection: +// +// (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d) +// (2) Intersection(c,d,a,b) == Intersection(a,b,c,d) +// +// The returned intersection point X is guaranteed to be very close to the +// true intersection point of AB and CD, even if the edges intersect at a +// very small angle. +func Intersection(a0, a1, b0, b1 Point) Point { + // It is difficult to compute the intersection point of two edges accurately + // when the angle between the edges is very small. Previously we handled + // this by only guaranteeing that the returned intersection point is within + // intersectionError of each edge. However, this means that when the edges + // cross at a very small angle, the computed result may be very far from the + // true intersection point. + // + // Instead this function now guarantees that the result is always within + // intersectionError of the true intersection. This requires using more + // sophisticated techniques and in some cases extended precision. + // + // - intersectionStable computes the intersection point using + // projection and interpolation, taking care to minimize cancellation + // error. + // + // - intersectionExact computes the intersection point using precision + // arithmetic and converts the final result back to an Point. + pt, ok := intersectionStable(a0, a1, b0, b1) + if !ok { + pt = intersectionExact(a0, a1, b0, b1) + } + + // Make sure the intersection point is on the correct side of the sphere. + // Since all vertices are unit length, and edges are less than 180 degrees, + // (a0 + a1) and (b0 + b1) both have positive dot product with the + // intersection point. We use the sum of all vertices to make sure that the + // result is unchanged when the edges are swapped or reversed. + if pt.Dot((a0.Add(a1.Vector)).Add(b0.Add(b1.Vector))) < 0 { + pt = Point{pt.Mul(-1)} + } + + return pt +} + +// Computes the cross product of two vectors, normalized to be unit length. +// Also returns the length of the cross +// product before normalization, which is useful for estimating the amount of +// error in the result. For numerical stability, the vectors should both be +// approximately unit length. +func robustNormalWithLength(x, y r3.Vector) (r3.Vector, float64) { + var pt r3.Vector + // This computes 2 * (x.Cross(y)), but has much better numerical + // stability when x and y are unit length. + tmp := x.Sub(y).Cross(x.Add(y)) + length := tmp.Norm() + if length != 0 { + pt = tmp.Mul(1 / length) + } + return pt, 0.5 * length // Since tmp == 2 * (x.Cross(y)) +} + +/* +// intersectionSimple is not used by the C++ so it is skipped here. +*/ + +// projection returns the projection of aNorm onto X (x.Dot(aNorm)), and a bound +// on the error in the result. aNorm is not necessarily unit length. +// +// The remaining parameters (the length of aNorm (aNormLen) and the edge endpoints +// a0 and a1) allow this dot product to be computed more accurately and efficiently. +func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound float64) { + // The error in the dot product is proportional to the lengths of the input + // vectors, so rather than using x itself (a unit-length vector) we use + // the vectors from x to the closer of the two edge endpoints. This + // typically reduces the error by a huge factor. + x0 := x.Sub(a0.Vector) + x1 := x.Sub(a1.Vector) + x0Dist2 := x0.Norm2() + x1Dist2 := x1.Norm2() + + // If both distances are the same, we need to be careful to choose one + // endpoint deterministically so that the result does not change if the + // order of the endpoints is reversed. + var dist float64 + if x0Dist2 < x1Dist2 || (x0Dist2 == x1Dist2 && x0.Cmp(x1) == -1) { + dist = math.Sqrt(x0Dist2) + proj = x0.Dot(aNorm) + } else { + dist = math.Sqrt(x1Dist2) + proj = x1.Dot(aNorm) + } + + // This calculation bounds the error from all sources: the computation of + // the normal, the subtraction of one endpoint, and the dot product itself. + // dblError appears because the input points are assumed to be + // normalized in double precision. + // + // For reference, the bounds that went into this calculation are: + // ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * epsilon + // |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon + // ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon + bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * epsilon + return proj, bound +} + +// compareEdges reports whether (a0,a1) is less than (b0,b1) with respect to a total +// ordering on edges that is invariant under edge reversals. +func compareEdges(a0, a1, b0, b1 Point) bool { + if a0.Cmp(a1.Vector) != -1 { + a0, a1 = a1, a0 + } + if b0.Cmp(b1.Vector) != -1 { + b0, b1 = b1, b0 + } + return a0.Cmp(b0.Vector) == -1 || (a0 == b0 && b0.Cmp(b1.Vector) == -1) +} + +// intersectionStable returns the intersection point of the edges (a0,a1) and +// (b0,b1) if it can be computed to within an error of at most intersectionError +// by this function. +// +// The intersection point is not guaranteed to have the correct sign because we +// choose to use the longest of the two edges first. The sign is corrected by +// Intersection. +func intersectionStable(a0, a1, b0, b1 Point) (Point, bool) { + // Sort the two edges so that (a0,a1) is longer, breaking ties in a + // deterministic way that does not depend on the ordering of the endpoints. + // This is desirable for two reasons: + // - So that the result doesn't change when edges are swapped or reversed. + // - It reduces error, since the first edge is used to compute the edge + // normal (where a longer edge means less error), and the second edge + // is used for interpolation (where a shorter edge means less error). + aLen2 := a1.Sub(a0.Vector).Norm2() + bLen2 := b1.Sub(b0.Vector).Norm2() + if aLen2 < bLen2 || (aLen2 == bLen2 && compareEdges(a0, a1, b0, b1)) { + return intersectionStableSorted(b0, b1, a0, a1) + } + return intersectionStableSorted(a0, a1, b0, b1) +} + +// intersectionStableSorted is a helper function for intersectionStable. +// It expects that the edges (a0,a1) and (b0,b1) have been sorted so that +// the first edge passed in is longer. +func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) { + var pt Point + + // Compute the normal of the plane through (a0, a1) in a stable way. + aNorm := a0.Sub(a1.Vector).Cross(a0.Add(a1.Vector)) + aNormLen := aNorm.Norm() + bLen := b1.Sub(b0.Vector).Norm() + + // Compute the projection (i.e., signed distance) of b0 and b1 onto the + // plane through (a0, a1). Distances are scaled by the length of aNorm. + b0Dist, b0Error := projection(b0.Vector, aNorm, aNormLen, a0, a1) + b1Dist, b1Error := projection(b1.Vector, aNorm, aNormLen, a0, a1) + + // The total distance from b0 to b1 measured perpendicularly to (a0,a1) is + // |b0Dist - b1Dist|. Note that b0Dist and b1Dist generally have + // opposite signs because b0 and b1 are on opposite sides of (a0, a1). The + // code below finds the intersection point by interpolating along the edge + // (b0, b1) to a fractional distance of b0Dist / (b0Dist - b1Dist). + // + // It can be shown that the maximum error in the interpolation fraction is + // + // (b0Dist * b1Error - b1Dist * b0Error) / (distSum * (distSum - errorSum)) + // + // We save ourselves some work by scaling the result and the error bound by + // "distSum", since the result is normalized to be unit length anyway. + distSum := math.Abs(b0Dist - b1Dist) + errorSum := b0Error + b1Error + if distSum <= errorSum { + return pt, false // Error is unbounded in this case. + } + + x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist)) + err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/ + (distSum-errorSum) + 2*distSum*epsilon + + // Finally we normalize the result, compute the corresponding error, and + // check whether the total error is acceptable. + xLen := x.Norm() + maxError := intersectionError + if err > (float64(maxError)-epsilon)*xLen { + return pt, false + } + + return Point{x.Mul(1 / xLen)}, true +} + +// intersectionExact returns the intersection point of (a0, a1) and (b0, b1) +// using precise arithmetic. Note that the result is not exact because it is +// rounded down to double precision at the end. Also, the intersection point +// is not guaranteed to have the correct sign (i.e., the return value may need +// to be negated). +func intersectionExact(a0, a1, b0, b1 Point) Point { + // Since we are using presice arithmetic, we don't need to worry about + // numerical stability. + a0P := r3.PreciseVectorFromVector(a0.Vector) + a1P := r3.PreciseVectorFromVector(a1.Vector) + b0P := r3.PreciseVectorFromVector(b0.Vector) + b1P := r3.PreciseVectorFromVector(b1.Vector) + aNormP := a0P.Cross(a1P) + bNormP := b0P.Cross(b1P) + xP := aNormP.Cross(bNormP) + + // The final Normalize() call is done in double precision, which creates a + // directional error of up to 2*dblError. (Precise conversion and Normalize() + // each contribute up to dblError of directional error.) + x := xP.Vector() + + if x == (r3.Vector{}) { + // The two edges are exactly collinear, but we still consider them to be + // "crossing" because of simulation of simplicity. Out of the four + // endpoints, exactly two lie in the interior of the other edge. Of + // those two we return the one that is lexicographically smallest. + x = r3.Vector{10, 10, 10} // Greater than any valid S2Point + + aNorm := Point{aNormP.Vector()} + bNorm := Point{bNormP.Vector()} + if OrderedCCW(b0, a0, b1, bNorm) && a0.Cmp(x) == -1 { + return a0 + } + if OrderedCCW(b0, a1, b1, bNorm) && a1.Cmp(x) == -1 { + return a1 + } + if OrderedCCW(a0, b0, a1, aNorm) && b0.Cmp(x) == -1 { + return b0 + } + if OrderedCCW(a0, b1, a1, aNorm) && b1.Cmp(x) == -1 { + return b1 + } + } + + return Point{x} +} diff --git a/vendor/github.com/blevesearch/geo/s2/edge_distances.go b/vendor/github.com/blevesearch/geo/s2/edge_distances.go new file mode 100644 index 0000000..ca197af --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/edge_distances.go @@ -0,0 +1,408 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file defines a collection of methods for computing the distance to an edge, +// interpolating along an edge, projecting points onto edges, etc. + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// DistanceFromSegment returns the distance of point X from line segment AB. +// The points are expected to be normalized. The result is very accurate for small +// distances but may have some numerical error if the distance is large +// (approximately pi/2 or greater). The case A == B is handled correctly. +func DistanceFromSegment(x, a, b Point) s1.Angle { + var minDist s1.ChordAngle + minDist, _ = updateMinDistance(x, a, b, minDist, true) + return minDist.Angle() +} + +// IsDistanceLess reports whether the distance from X to the edge AB is less +// than limit. (For less than or equal to, specify limit.Successor()). +// This method is faster than DistanceFromSegment(). If you want to +// compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle +// once and save the value, since this conversion is relatively expensive. +func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool { + _, less := UpdateMinDistance(x, a, b, limit) + return less +} + +// UpdateMinDistance checks if the distance from X to the edge AB is less +// than minDist, and if so, returns the updated value and true. +// The case A == B is handled correctly. +// +// Use this method when you want to compute many distances and keep track of +// the minimum. It is significantly faster than using DistanceFromSegment +// because (1) using s1.ChordAngle is much faster than s1.Angle, and (2) it +// can save a lot of work by not actually computing the distance when it is +// obviously larger than the current minimum. +func UpdateMinDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { + return updateMinDistance(x, a, b, minDist, false) +} + +// UpdateMaxDistance checks if the distance from X to the edge AB is greater +// than maxDist, and if so, returns the updated value and true. +// Otherwise it returns false. The case A == B is handled correctly. +func UpdateMaxDistance(x, a, b Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) { + dist := maxChordAngle(ChordAngleBetweenPoints(x, a), ChordAngleBetweenPoints(x, b)) + if dist > s1.RightChordAngle { + dist, _ = updateMinDistance(Point{x.Mul(-1)}, a, b, dist, true) + dist = s1.StraightChordAngle - dist + } + if maxDist < dist { + return dist, true + } + + return maxDist, false +} + +// IsInteriorDistanceLess reports whether the minimum distance from X to the edge +// AB is attained at an interior point of AB (i.e., not an endpoint), and that +// distance is less than limit. (Specify limit.Successor() for less than or equal to). +func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool { + _, less := UpdateMinInteriorDistance(x, a, b, limit) + return less +} + +// UpdateMinInteriorDistance reports whether the minimum distance from X to AB +// is attained at an interior point of AB (i.e., not an endpoint), and that distance +// is less than minDist. If so, the value of minDist is updated and true is returned. +// Otherwise it is unchanged and returns false. +func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { + return interiorDist(x, a, b, minDist, false) +} + +// Project returns the point along the edge AB that is closest to the point X. +// The fractional distance of this point along the edge AB can be obtained +// using DistanceFraction. +// +// This requires that all points are unit length. +func Project(x, a, b Point) Point { + aXb := a.PointCross(b) + // Find the closest point to X along the great circle through AB. + p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2())) + + // If this point is on the edge AB, then it's the closest point. + if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) { + return Point{p.Normalize()} + } + + // Otherwise, the closest point is either A or B. + if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() { + return a + } + return b +} + +// DistanceFraction returns the distance ratio of the point X along an edge AB. +// If X is on the line segment AB, this is the fraction T such +// that X == Interpolate(T, A, B). +// +// This requires that A and B are distinct. +func DistanceFraction(x, a, b Point) float64 { + d0 := x.Angle(a.Vector) + d1 := x.Angle(b.Vector) + return float64(d0 / (d0 + d1)) +} + +// Interpolate returns the point X along the line segment AB whose distance from A +// is the given fraction "t" of the distance AB. Does NOT require that "t" be +// between 0 and 1. Note that all distances are measured on the surface of +// the sphere, so this is more complicated than just computing (1-t)*a + t*b +// and normalizing the result. +func Interpolate(t float64, a, b Point) Point { + if t == 0 { + return a + } + if t == 1 { + return b + } + ab := a.Angle(b.Vector) + return InterpolateAtDistance(s1.Angle(t)*ab, a, b) +} + +// InterpolateAtDistance returns the point X along the line segment AB whose +// distance from A is the angle ax. +func InterpolateAtDistance(ax s1.Angle, a, b Point) Point { + aRad := ax.Radians() + + // Use PointCross to compute the tangent vector at A towards B. The + // result is always perpendicular to A, even if A=B or A=-B, but it is not + // necessarily unit length. (We effectively normalize it below.) + normal := a.PointCross(b) + tangent := normal.Vector.Cross(a.Vector) + + // Now compute the appropriate linear combination of A and "tangent". With + // infinite precision the result would always be unit length, but we + // normalize it anyway to ensure that the error is within acceptable bounds. + // (Otherwise errors can build up when the result of one interpolation is + // fed into another interpolation.) + return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()} +} + +// minUpdateDistanceMaxError returns the maximum error in the result of +// UpdateMinDistance (and the associated functions such as +// UpdateMinInteriorDistance, IsDistanceLess, etc), assuming that all +// input points are normalized to within the bounds guaranteed by r3.Vector's +// Normalize. The error can be added or subtracted from an s1.ChordAngle +// using its Expanded method. +func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 { + // There are two cases for the maximum error in UpdateMinDistance(), + // depending on whether the closest point is interior to the edge. + return math.Max(minUpdateInteriorDistanceMaxError(dist), dist.MaxPointError()) +} + +// minUpdateInteriorDistanceMaxError returns the maximum error in the result of +// UpdateMinInteriorDistance, assuming that all input points are normalized +// to within the bounds guaranteed by Point's Normalize. The error can be added +// or subtracted from an s1.ChordAngle using its Expanded method. +// +// Note that accuracy goes down as the distance approaches 0 degrees or 180 +// degrees (for different reasons). Near 0 degrees the error is acceptable +// for all practical purposes (about 1.2e-15 radians ~= 8 nanometers). For +// exactly antipodal points the maximum error is quite high (0.5 meters), +// but this error drops rapidly as the points move away from antipodality +// (approximately 1 millimeter for points that are 50 meters from antipodal, +// and 1 micrometer for points that are 50km from antipodal). +// +// TODO(roberts): Currently the error bound does not hold for edges whose endpoints +// are antipodal to within about 1e-15 radians (less than 1 micron). This could +// be fixed by extending PointCross to use higher precision when necessary. +func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 { + // If a point is more than 90 degrees from an edge, then the minimum + // distance is always to one of the endpoints, not to the edge interior. + if dist >= s1.RightChordAngle { + return 0.0 + } + + // This bound includes all source of error, assuming that the input points + // are normalized. a and b are components of chord length that are + // perpendicular and parallel to a plane containing the edge respectively. + b := math.Min(1.0, 0.5*float64(dist)) + a := math.Sqrt(b * (2 - b)) + return ((2.5+2*math.Sqrt(3)+8.5*a)*a + + (2+2*math.Sqrt(3)/3+6.5*(1-b))*b + + (23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon +} + +// updateMinDistance computes the distance from a point X to a line segment AB, +// and if either the distance was less than the given minDist, or alwaysUpdate is +// true, the value and whether it was updated are returned. +func updateMinDistance(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) { + if d, ok := interiorDist(x, a, b, minDist, alwaysUpdate); ok { + // Minimum distance is attained along the edge interior. + return d, true + } + + // Otherwise the minimum distance is to one of the endpoints. + xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2() + dist := s1.ChordAngle(math.Min(xa2, xb2)) + if !alwaysUpdate && dist >= minDist { + return minDist, false + } + return dist, true +} + +// interiorDist returns the shortest distance from point x to edge ab, assuming +// that the closest point to X is interior to AB. If the closest point is not +// interior to AB, interiorDist returns (minDist, false). If alwaysUpdate is set to +// false, the distance is only updated when the value exceeds certain the given minDist. +func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) { + // Chord distance of x to both end points a and b. + xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2() + + // The closest point on AB could either be one of the two vertices (the + // vertex case) or in the interior (the interior case). Let C = A x B. + // If X is in the spherical wedge extending from A to B around the axis + // through C, then we are in the interior case. Otherwise we are in the + // vertex case. + // + // Check whether we might be in the interior case. For this to be true, XAB + // and XBA must both be acute angles. Checking this condition exactly is + // expensive, so instead we consider the planar triangle ABX (which passes + // through the sphere's interior). The planar angles XAB and XBA are always + // less than the corresponding spherical angles, so if we are in the + // interior case then both of these angles must be acute. + // + // We check this by computing the squared edge lengths of the planar + // triangle ABX, and testing whether angles XAB and XBA are both acute using + // the law of cosines: + // + // | XA^2 - XB^2 | < AB^2 (*) + // + // This test must be done conservatively (taking numerical errors into + // account) since otherwise we might miss a situation where the true minimum + // distance is achieved by a point on the edge interior. + // + // There are two sources of error in the expression above (*). The first is + // that points are not normalized exactly; they are only guaranteed to be + // within 2 * dblEpsilon of unit length. Under the assumption that the two + // sides of (*) are nearly equal, the total error due to normalization errors + // can be shown to be at most + // + // 2 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 . + // + // The other source of error is rounding of results in the calculation of (*). + // Each of XA^2, XB^2, AB^2 has a maximum relative error of 2.5 * dblEpsilon, + // plus an additional relative error of 0.5 * dblEpsilon in the final + // subtraction which we further bound as 0.25 * dblEpsilon * (XA^2 + XB^2 + + // AB^2) for convenience. This yields a final error bound of + // + // 4.75 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 . + ab2 := a.Sub(b.Vector).Norm2() + maxError := (4.75*dblEpsilon*(xa2+xb2+ab2) + 8*dblEpsilon*dblEpsilon) + if math.Abs(xa2-xb2) >= ab2+maxError { + return minDist, false + } + + // The minimum distance might be to a point on the edge interior. Let R + // be closest point to X that lies on the great circle through AB. Rather + // than computing the geodesic distance along the surface of the sphere, + // instead we compute the "chord length" through the sphere's interior. + // + // The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q + // is the point X projected onto the plane through the great circle AB. + // The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B. + // We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it + // is faster and the corresponding distance on the Earth's surface is + // accurate to within 1% for distances up to about 1800km. + c := a.PointCross(b) + c2 := c.Norm2() + xDotC := x.Dot(c.Vector) + xDotC2 := xDotC * xDotC + if !alwaysUpdate && xDotC2 > c2*float64(minDist) { + // The closest point on the great circle AB is too far away. We need to + // test this using ">" rather than ">=" because the actual minimum bound + // on the distance is (xDotC2 / c2), which can be rounded differently + // than the (more efficient) multiplicative test above. + return minDist, false + } + + // Otherwise we do the exact, more expensive test for the interior case. + // This test is very likely to succeed because of the conservative planar + // test we did initially. + // + // TODO(roberts): Ensure that the errors in test are accurately reflected in the + // minUpdateInteriorDistanceMaxError. + cx := c.Cross(x.Vector) + if a.Sub(x.Vector).Dot(cx) >= 0 || b.Sub(x.Vector).Dot(cx) <= 0 { + return minDist, false + } + + // Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above). + // This calculation has good accuracy for all chord lengths since it + // is based on both the dot product and cross product (rather than + // deriving one from the other). However, note that the chord length + // representation itself loses accuracy as the angle approaches π. + qr := 1 - math.Sqrt(cx.Norm2()/c2) + dist := s1.ChordAngle((xDotC2 / c2) + (qr * qr)) + + if !alwaysUpdate && dist >= minDist { + return minDist, false + } + + return dist, true +} + +// updateEdgePairMinDistance computes the minimum distance between the given +// pair of edges. If the two edges cross, the distance is zero. The cases +// a0 == a1 and b0 == b1 are handled correctly. +func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { + if minDist == 0 { + return 0, false + } + if CrossingSign(a0, a1, b0, b1) == Cross { + minDist = 0 + return 0, true + } + + // Otherwise, the minimum distance is achieved at an endpoint of at least + // one of the two edges. We ensure that all four possibilities are always checked. + // + // The calculation below computes each of the six vertex-vertex distances + // twice (this could be optimized). + var ok1, ok2, ok3, ok4 bool + minDist, ok1 = UpdateMinDistance(a0, b0, b1, minDist) + minDist, ok2 = UpdateMinDistance(a1, b0, b1, minDist) + minDist, ok3 = UpdateMinDistance(b0, a0, a1, minDist) + minDist, ok4 = UpdateMinDistance(b1, a0, a1, minDist) + return minDist, ok1 || ok2 || ok3 || ok4 +} + +// updateEdgePairMaxDistance reports the minimum distance between the given pair of edges. +// If one edge crosses the antipodal reflection of the other, the distance is pi. +func updateEdgePairMaxDistance(a0, a1, b0, b1 Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) { + if maxDist == s1.StraightChordAngle { + return s1.StraightChordAngle, false + } + if CrossingSign(a0, a1, Point{b0.Mul(-1)}, Point{b1.Mul(-1)}) == Cross { + return s1.StraightChordAngle, true + } + + // Otherwise, the maximum distance is achieved at an endpoint of at least + // one of the two edges. We ensure that all four possibilities are always checked. + // + // The calculation below computes each of the six vertex-vertex distances + // twice (this could be optimized). + var ok1, ok2, ok3, ok4 bool + maxDist, ok1 = UpdateMaxDistance(a0, b0, b1, maxDist) + maxDist, ok2 = UpdateMaxDistance(a1, b0, b1, maxDist) + maxDist, ok3 = UpdateMaxDistance(b0, a0, a1, maxDist) + maxDist, ok4 = UpdateMaxDistance(b1, a0, a1, maxDist) + return maxDist, ok1 || ok2 || ok3 || ok4 +} + +// EdgePairClosestPoints returns the pair of points (a, b) that achieves the +// minimum distance between edges a0a1 and b0b1, where a is a point on a0a1 and +// b is a point on b0b1. If the two edges intersect, a and b are both equal to +// the intersection point. Handles a0 == a1 and b0 == b1 correctly. +func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) { + if CrossingSign(a0, a1, b0, b1) == Cross { + x := Intersection(a0, a1, b0, b1) + return x, x + } + // We save some work by first determining which vertex/edge pair achieves + // the minimum distance, and then computing the closest point on that edge. + var minDist s1.ChordAngle + var ok bool + + minDist, ok = updateMinDistance(a0, b0, b1, minDist, true) + closestVertex := 0 + if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok { + closestVertex = 1 + } + if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok { + closestVertex = 2 + } + if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok { + closestVertex = 3 + } + switch closestVertex { + case 0: + return a0, Project(a0, b0, b1) + case 1: + return a1, Project(a1, b0, b1) + case 2: + return Project(b0, a0, a1), b0 + case 3: + return Project(b1, a0, a1), b1 + default: + panic("illegal case reached") + } +} diff --git a/vendor/github.com/blevesearch/geo/s2/edge_query.go b/vendor/github.com/blevesearch/geo/s2/edge_query.go new file mode 100644 index 0000000..6c86962 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/edge_query.go @@ -0,0 +1,816 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" + + "github.com/golang/geo/s1" +) + +// EdgeQueryOptions holds the options for controlling how EdgeQuery operates. +// +// Options can be chained together builder-style: +// +// opts = NewClosestEdgeQueryOptions(). +// MaxResults(1). +// DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)). +// MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree)) +// query = NewClosestEdgeQuery(index, opts) +// +// or set individually: +// +// opts = NewClosestEdgeQueryOptions() +// opts.IncludeInteriors(true) +// +// or just inline: +// +// query = NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions().MaxResults(3)) +// +// If you pass a nil as the options you get the default values for the options. +type EdgeQueryOptions struct { + common *queryOptions +} + +// DistanceLimit specifies that only edges whose distance to the target is +// within, this distance should be returned. Edges whose distance is equal +// are not returned. To include values that are equal, specify the limit with +// the next largest representable distance. i.e. limit.Successor(). +func (e *EdgeQueryOptions) DistanceLimit(limit s1.ChordAngle) *EdgeQueryOptions { + e.common = e.common.DistanceLimit(limit) + return e +} + +// IncludeInteriors specifies whether polygon interiors should be +// included when measuring distances. +func (e *EdgeQueryOptions) IncludeInteriors(x bool) *EdgeQueryOptions { + e.common = e.common.IncludeInteriors(x) + return e +} + +// UseBruteForce sets or disables the use of brute force in a query. +func (e *EdgeQueryOptions) UseBruteForce(x bool) *EdgeQueryOptions { + e.common = e.common.UseBruteForce(x) + return e +} + +// MaxError specifies that edges up to dist away than the true +// matching edges may be substituted in the result set, as long as such +// edges satisfy all the remaining search criteria (such as DistanceLimit). +// This option only has an effect if MaxResults is also specified; +// otherwise all edges closer than MaxDistance will always be returned. +func (e *EdgeQueryOptions) MaxError(dist s1.ChordAngle) *EdgeQueryOptions { + e.common = e.common.MaxError(dist) + return e +} + +// MaxResults specifies that at most MaxResults edges should be returned. +// This must be at least 1. +func (e *EdgeQueryOptions) MaxResults(n int) *EdgeQueryOptions { + e.common = e.common.MaxResults(n) + return e +} + +// NewClosestEdgeQueryOptions returns a set of edge query options suitable +// for performing closest edge queries. +func NewClosestEdgeQueryOptions() *EdgeQueryOptions { + return &EdgeQueryOptions{ + common: newQueryOptions(minDistance(0)), + } +} + +// NewFurthestEdgeQueryOptions returns a set of edge query options suitable +// for performing furthest edge queries. +func NewFurthestEdgeQueryOptions() *EdgeQueryOptions { + return &EdgeQueryOptions{ + common: newQueryOptions(maxDistance(0)), + } +} + +// EdgeQueryResult represents an edge that meets the target criteria for the +// query. Note the following special cases: +// +// - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape. +// Such results may be returned when the option IncludeInteriors is true. +// +// - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge +// satisfies the requested query options. +type EdgeQueryResult struct { + distance distance + shapeID int32 + edgeID int32 +} + +// Distance reports the distance between the edge in this shape that satisfied +// the query's parameters. +func (e EdgeQueryResult) Distance() s1.ChordAngle { return e.distance.chordAngle() } + +// ShapeID reports the ID of the Shape this result is for. +func (e EdgeQueryResult) ShapeID() int32 { return e.shapeID } + +// EdgeID reports the ID of the edge in the results Shape. +func (e EdgeQueryResult) EdgeID() int32 { return e.edgeID } + +// newEdgeQueryResult returns a result instance with default values. +func newEdgeQueryResult(target distanceTarget) EdgeQueryResult { + return EdgeQueryResult{ + distance: target.distance().infinity(), + shapeID: -1, + edgeID: -1, + } +} + +// IsInterior reports if this result represents the interior of a Shape. +func (e EdgeQueryResult) IsInterior() bool { + return e.shapeID >= 0 && e.edgeID < 0 +} + +// IsEmpty reports if this has no edge that satisfies the given edge query options. +// This result is only returned in one special case, namely when FindEdge() does +// not find any suitable edges. +func (e EdgeQueryResult) IsEmpty() bool { + return e.shapeID < 0 +} + +// Less reports if this results is less that the other first by distance, +// then by (shapeID, edgeID). This is used for sorting. +func (e EdgeQueryResult) Less(other EdgeQueryResult) bool { + if e.distance.chordAngle() != other.distance.chordAngle() { + return e.distance.less(other.distance) + } + if e.shapeID != other.shapeID { + return e.shapeID < other.shapeID + } + return e.edgeID < other.edgeID +} + +// EdgeQuery is used to find the edge(s) between two geometries that match a +// given set of options. It is flexible enough so that it can be adapted to +// compute maximum distances and even potentially Hausdorff distances. +// +// By using the appropriate options, this type can answer questions such as: +// +// - Find the minimum distance between two geometries A and B. +// - Find all edges of geometry A that are within a distance D of geometry B. +// - Find the k edges of geometry A that are closest to a given point P. +// +// You can also specify whether polygons should include their interiors (i.e., +// if a point is contained by a polygon, should the distance be zero or should +// it be measured to the polygon boundary?) +// +// The input geometries may consist of any number of points, polylines, and +// polygons (collectively referred to as "shapes"). Shapes do not need to be +// disjoint; they may overlap or intersect arbitrarily. The implementation is +// designed to be fast for both simple and complex geometries. +type EdgeQuery struct { + index *ShapeIndex + opts *queryOptions + target distanceTarget + + // True if opts.maxError must be subtracted from ShapeIndex cell distances + // in order to ensure that such distances are measured conservatively. This + // is true only if the target takes advantage of maxError in order to + // return faster results, and 0 < maxError < distanceLimit. + useConservativeCellDistance bool + + // The decision about whether to use the brute force algorithm is based on + // counting the total number of edges in the index. However if the index + // contains a large number of shapes, this in itself might take too long. + // So instead we only count edges up to (maxBruteForceIndexSize() + 1) + // for the current target type (stored as indexNumEdgesLimit). + indexNumEdges int + indexNumEdgesLimit int + + // The distance beyond which we can safely ignore further candidate edges. + // (Candidates that are exactly at the limit are ignored; this is more + // efficient for UpdateMinDistance and should not affect clients since + // distance measurements have a small amount of error anyway.) + // + // Initially this is the same as the maximum distance specified by the user, + // but it can also be updated by the algorithm (see maybeAddResult). + distanceLimit distance + + // The current set of results of the query. + results []EdgeQueryResult + + // This field is true when duplicates must be avoided explicitly. This + // is achieved by maintaining a separate set keyed by (shapeID, edgeID) + // only, and checking whether each edge is in that set before computing the + // distance to it. + avoidDuplicates bool + + // testedEdges tracks the set of shape and edges that have already been tested. + testedEdges map[ShapeEdgeID]uint32 + + // For the optimized algorihm we precompute the top-level CellIDs that + // will be added to the priority queue. There can be at most 6 of these + // cells. Essentially this is just a covering of the indexed edges, except + // that we also store pointers to the corresponding ShapeIndexCells to + // reduce the number of index seeks required. + indexCovering []CellID + indexCells []*ShapeIndexCell + + // The algorithm maintains a priority queue of unprocessed CellIDs, sorted + // in increasing order of distance from the target. + queue *queryQueue + + iter *ShapeIndexIterator + maxDistanceCovering []CellID + initialCells []CellID +} + +// NewClosestEdgeQuery returns an EdgeQuery that is used for finding the +// closest edge(s) to a given Point, Edge, Cell, or geometry collection. +// +// You can find either the k closest edges, or all edges within a given +// radius, or both (i.e., the k closest edges up to a given maximum radius). +// E.g. to find all the edges within 5 kilometers, set the DistanceLimit in +// the options. +// +// By default *all* edges are returned, so you should always specify either +// MaxResults or DistanceLimit options or both. +// +// Note that by default, distances are measured to the boundary and interior +// of polygons. For example, if a point is inside a polygon then its distance +// is zero. To change this behavior, set the IncludeInteriors option to false. +// +// If you only need to test whether the distance is above or below a given +// threshold (e.g., 10 km), you can use the IsDistanceLess() method. This is +// much faster than actually calculating the distance with FindEdge, +// since the implementation can stop as soon as it can prove that the minimum +// distance is either above or below the threshold. +func NewClosestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery { + if opts == nil { + opts = NewClosestEdgeQueryOptions() + } + e := &EdgeQuery{ + testedEdges: make(map[ShapeEdgeID]uint32), + index: index, + opts: opts.common, + queue: newQueryQueue(), + } + + return e +} + +// NewFurthestEdgeQuery returns an EdgeQuery that is used for finding the +// furthest edge(s) to a given Point, Edge, Cell, or geometry collection. +// +// The furthest edge is defined as the one which maximizes the +// distance from any point on that edge to any point on the target geometry. +// +// Similar to the example in NewClosestEdgeQuery, to find the 5 furthest edges +// from a given Point: +func NewFurthestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery { + if opts == nil { + opts = NewFurthestEdgeQueryOptions() + } + e := &EdgeQuery{ + testedEdges: make(map[ShapeEdgeID]uint32), + index: index, + opts: opts.common, + queue: newQueryQueue(), + } + + return e +} + +// Reset resets the state of this EdgeQuery. +func (e *EdgeQuery) Reset() { + e.indexNumEdges = 0 + e.indexNumEdgesLimit = 0 + e.indexCovering = nil + e.indexCells = nil +} + +// FindEdges returns the edges for the given target that satisfy the current options. +// +// Note that if opts.IncludeInteriors is true, the results may include some +// entries with edge_id == -1. This indicates that the target intersects +// the indexed polygon with the given ShapeID. +func (e *EdgeQuery) FindEdges(target distanceTarget) []EdgeQueryResult { + return e.findEdges(target, e.opts) +} + +// Distance reports the distance to the target. If the index or target is empty, +// returns the EdgeQuery's maximal sentinel. +// +// Use IsDistanceLess()/IsDistanceGreater() if you only want to compare the +// distance against a threshold value, since it is often much faster. +func (e *EdgeQuery) Distance(target distanceTarget) s1.ChordAngle { + return e.findEdge(target, e.opts).Distance() +} + +// IsDistanceLess reports if the distance to target is less than the given limit. +// +// This method is usually much faster than Distance(), since it is much +// less work to determine whether the minimum distance is above or below a +// threshold than it is to calculate the actual minimum distance. +// +// If you wish to check if the distance is less than or equal to the limit, use: +// +// query.IsDistanceLess(target, limit.Successor()) +// +func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) bool { + opts := e.opts + opts = opts.MaxResults(1). + DistanceLimit(limit). + MaxError(s1.StraightChordAngle) + return !e.findEdge(target, opts).IsEmpty() +} + +// IsDistanceGreater reports if the distance to target is greater than limit. +// +// This method is usually much faster than Distance, since it is much +// less work to determine whether the maximum distance is above or below a +// threshold than it is to calculate the actual maximum distance. +// If you wish to check if the distance is less than or equal to the limit, use: +// +// query.IsDistanceGreater(target, limit.Predecessor()) +// +func (e *EdgeQuery) IsDistanceGreater(target distanceTarget, limit s1.ChordAngle) bool { + return e.IsDistanceLess(target, limit) +} + +// IsConservativeDistanceLessOrEqual reports if the distance to target is less +// or equal to the limit, where the limit has been expanded by the maximum error +// for the distance calculation. +// +// For example, suppose that we want to test whether two geometries might +// intersect each other after they are snapped together using Builder +// (using the IdentitySnapFunction with a given "snap radius"). Since +// Builder uses exact distance predicates (s2predicates), we need to +// measure the distance between the two geometries conservatively. If the +// distance is definitely greater than "snap radius", then the geometries +// are guaranteed to not intersect after snapping. +func (e *EdgeQuery) IsConservativeDistanceLessOrEqual(target distanceTarget, limit s1.ChordAngle) bool { + return e.IsDistanceLess(target, limit.Expanded(minUpdateDistanceMaxError(limit))) +} + +// IsConservativeDistanceGreaterOrEqual reports if the distance to the target is greater +// than or equal to the given limit with some small tolerance. +func (e *EdgeQuery) IsConservativeDistanceGreaterOrEqual(target distanceTarget, limit s1.ChordAngle) bool { + return e.IsDistanceGreater(target, limit.Expanded(-minUpdateDistanceMaxError(limit))) +} + +// findEdges returns the closest edges to the given target that satisfy the given options. +// +// Note that if opts.includeInteriors is true, the results may include some +// entries with edgeID == -1. This indicates that the target intersects the +// indexed polygon with the given shapeID. +func (e *EdgeQuery) findEdges(target distanceTarget, opts *queryOptions) []EdgeQueryResult { + e.findEdgesInternal(target, opts) + // TODO(roberts): Revisit this if there is a heap or other sorted and + // uniquing datastructure we can use instead of just a slice. + e.results = sortAndUniqueResults(e.results) + if len(e.results) > e.opts.maxResults { + e.results = e.results[:e.opts.maxResults] + } + return e.results +} + +func sortAndUniqueResults(results []EdgeQueryResult) []EdgeQueryResult { + if len(results) <= 1 { + return results + } + sort.Slice(results, func(i, j int) bool { return results[i].Less(results[j]) }) + j := 0 + for i := 1; i < len(results); i++ { + if results[j] == results[i] { + continue + } + j++ + results[j] = results[i] + } + return results[:j+1] +} + +// findEdge is a convenience method that returns exactly one edge, and if no +// edges satisfy the given search criteria, then a default Result is returned. +// +// This is primarily to ease the usage of a number of the methods in the DistanceTargets +// and in EdgeQuery. +func (e *EdgeQuery) findEdge(target distanceTarget, opts *queryOptions) EdgeQueryResult { + opts.MaxResults(1) + e.findEdges(target, opts) + if len(e.results) > 0 { + return e.results[0] + } + + return newEdgeQueryResult(target) +} + +// findEdgesInternal does the actual work for find edges that match the given options. +func (e *EdgeQuery) findEdgesInternal(target distanceTarget, opts *queryOptions) { + e.target = target + e.opts = opts + + e.testedEdges = make(map[ShapeEdgeID]uint32) + e.distanceLimit = target.distance().fromChordAngle(opts.distanceLimit) + e.results = make([]EdgeQueryResult, 0) + + if e.distanceLimit == target.distance().zero() { + return + } + + if opts.includeInteriors { + shapeIDs := map[int32]struct{}{} + e.target.visitContainingShapes(e.index, func(containingShape Shape, targetPoint Point) bool { + shapeIDs[e.index.idForShape(containingShape)] = struct{}{} + return len(shapeIDs) < opts.maxResults + }) + for shapeID := range shapeIDs { + e.addResult(EdgeQueryResult{target.distance().zero(), shapeID, -1}) + } + + if e.distanceLimit == target.distance().zero() { + return + } + } + + // If maxError > 0 and the target takes advantage of this, then we may + // need to adjust the distance estimates to ShapeIndex cells to ensure + // that they are always a lower bound on the true distance. For example, + // suppose max_distance == 100, maxError == 30, and we compute the distance + // to the target from some cell C0 as d(C0) == 80. Then because the target + // takes advantage of maxError, the true distance could be as low as 50. + // In order not to miss edges contained by such cells, we need to subtract + // maxError from the distance estimates. This behavior is controlled by + // the useConservativeCellDistance flag. + // + // However there is one important case where this adjustment is not + // necessary, namely when distanceLimit < maxError, This is because + // maxError only affects the algorithm once at least maxEdges edges + // have been found that satisfy the given distance limit. At that point, + // maxError is subtracted from distanceLimit in order to ensure that + // any further matches are closer by at least that amount. But when + // distanceLimit < maxError, this reduces the distance limit to 0, + // i.e. all remaining candidate cells and edges can safely be discarded. + // (This is how IsDistanceLess() and friends are implemented.) + targetUsesMaxError := opts.maxError != target.distance().zero().chordAngle() && + e.target.setMaxError(opts.maxError) + + // Note that we can't compare maxError and distanceLimit directly + // because one is a Delta and one is a Distance. Instead we subtract them. + e.useConservativeCellDistance = targetUsesMaxError && + (e.distanceLimit == target.distance().infinity() || + target.distance().zero().less(e.distanceLimit.sub(target.distance().fromChordAngle(opts.maxError)))) + + // Use the brute force algorithm if the index is small enough. To avoid + // spending too much time counting edges when there are many shapes, we stop + // counting once there are too many edges. We may need to recount the edges + // if we later see a target with a larger brute force edge threshold. + minOptimizedEdges := e.target.maxBruteForceIndexSize() + 1 + if minOptimizedEdges > e.indexNumEdgesLimit && e.indexNumEdges >= e.indexNumEdgesLimit { + e.indexNumEdges = e.index.NumEdgesUpTo(minOptimizedEdges) + e.indexNumEdgesLimit = minOptimizedEdges + } + + if opts.useBruteForce || e.indexNumEdges < minOptimizedEdges { + // The brute force algorithm already considers each edge exactly once. + e.avoidDuplicates = false + e.findEdgesBruteForce() + } else { + // If the target takes advantage of maxError then we need to avoid + // duplicate edges explicitly. (Otherwise it happens automatically.) + e.avoidDuplicates = targetUsesMaxError && opts.maxResults > 1 + e.findEdgesOptimized() + } +} + +func (e *EdgeQuery) addResult(r EdgeQueryResult) { + e.results = append(e.results, r) + if e.opts.maxResults == 1 { + // Optimization for the common case where only the closest edge is wanted. + e.distanceLimit = r.distance.sub(e.target.distance().fromChordAngle(e.opts.maxError)) + } + // TODO(roberts): Add the other if/else cases when a different data structure + // is used for the results. +} + +func (e *EdgeQuery) maybeAddResult(shape Shape, edgeID int32) { + if _, ok := e.testedEdges[ShapeEdgeID{e.index.idForShape(shape), edgeID}]; e.avoidDuplicates && !ok { + return + } + edge := shape.Edge(int(edgeID)) + dist := e.distanceLimit + + if dist, ok := e.target.updateDistanceToEdge(edge, dist); ok { + e.addResult(EdgeQueryResult{dist, e.index.idForShape(shape), edgeID}) + } +} + +func (e *EdgeQuery) findEdgesBruteForce() { + // Range over all shapes in the index. Does order matter here? if so + // switch to for i = 0 .. n? + for _, shape := range e.index.shapes { + // TODO(roberts): can this happen if we are only ranging over current entries? + if shape == nil { + continue + } + for edgeID := int32(0); edgeID < int32(shape.NumEdges()); edgeID++ { + e.maybeAddResult(shape, edgeID) + } + } +} + +func (e *EdgeQuery) findEdgesOptimized() { + e.initQueue() + // Repeatedly find the closest Cell to "target" and either split it into + // its four children or process all of its edges. + for e.queue.size() > 0 { + // We need to copy the top entry before removing it, and we need to + // remove it before adding any new entries to the queue. + entry := e.queue.pop() + + if !entry.distance.less(e.distanceLimit) { + e.queue.reset() // Clear any remaining entries. + break + } + // If this is already known to be an index cell, just process it. + if entry.indexCell != nil { + e.processEdges(entry) + continue + } + // Otherwise split the cell into its four children. Before adding a + // child back to the queue, we first check whether it is empty. We do + // this in two seek operations rather than four by seeking to the key + // between children 0 and 1 and to the key between children 2 and 3. + id := entry.id + ch := id.Children() + e.iter.seek(ch[1].RangeMin()) + + if !e.iter.Done() && e.iter.CellID() <= ch[1].RangeMax() { + e.processOrEnqueueCell(ch[1]) + } + if e.iter.Prev() && e.iter.CellID() >= id.RangeMin() { + e.processOrEnqueueCell(ch[0]) + } + + e.iter.seek(ch[3].RangeMin()) + if !e.iter.Done() && e.iter.CellID() <= id.RangeMax() { + e.processOrEnqueueCell(ch[3]) + } + if e.iter.Prev() && e.iter.CellID() >= ch[2].RangeMin() { + e.processOrEnqueueCell(ch[2]) + } + } +} + +func (e *EdgeQuery) processOrEnqueueCell(id CellID) { + if e.iter.CellID() == id { + e.processOrEnqueue(id, e.iter.IndexCell()) + } else { + e.processOrEnqueue(id, nil) + } +} + +func (e *EdgeQuery) initQueue() { + if len(e.indexCovering) == 0 { + // We delay iterator initialization until now to make queries on very + // small indexes a bit faster (i.e., where brute force is used). + e.iter = NewShapeIndexIterator(e.index) + } + + // Optimization: if the user is searching for just the closest edge, and the + // center of the target's bounding cap happens to intersect an index cell, + // then we try to limit the search region to a small disc by first + // processing the edges in that cell. This sets distance_limit_ based on + // the closest edge in that cell, which we can then use to limit the search + // area. This means that the cell containing "target" will be processed + // twice, but in general this is still faster. + // + // TODO(roberts): Even if the cap center is not contained, we could still + // process one or both of the adjacent index cells in CellID order, + // provided that those cells are closer than distanceLimit. + cb := e.target.capBound() + if cb.IsEmpty() { + return // Empty target. + } + + if e.opts.maxResults == 1 && e.iter.LocatePoint(cb.Center()) { + e.processEdges(&queryQueueEntry{ + distance: e.target.distance().zero(), + id: e.iter.CellID(), + indexCell: e.iter.IndexCell(), + }) + // Skip the rest of the algorithm if we found an intersecting edge. + if e.distanceLimit == e.target.distance().zero() { + return + } + } + if len(e.indexCovering) == 0 { + e.initCovering() + } + if e.distanceLimit == e.target.distance().infinity() { + // Start with the precomputed index covering. + for i := range e.indexCovering { + e.processOrEnqueue(e.indexCovering[i], e.indexCells[i]) + } + } else { + // Compute a covering of the search disc and intersect it with the + // precomputed index covering. + coverer := &RegionCoverer{MaxCells: 4, LevelMod: 1, MaxLevel: maxLevel} + + radius := cb.Radius() + e.distanceLimit.chordAngleBound().Angle() + searchCB := CapFromCenterAngle(cb.Center(), radius) + maxDistCover := coverer.FastCovering(searchCB) + e.initialCells = CellUnionFromIntersection(e.indexCovering, maxDistCover) + + // Now we need to clean up the initial cells to ensure that they all + // contain at least one cell of the ShapeIndex. (Some may not intersect + // the index at all, while other may be descendants of an index cell.) + i, j := 0, 0 + for i < len(e.initialCells) { + idI := e.initialCells[i] + // Find the top-level cell that contains this initial cell. + for e.indexCovering[j].RangeMax() < idI { + j++ + } + + idJ := e.indexCovering[j] + if idI == idJ { + // This initial cell is one of the top-level cells. Use the + // precomputed ShapeIndexCell pointer to avoid an index seek. + e.processOrEnqueue(idJ, e.indexCells[j]) + i++ + j++ + } else { + // This initial cell is a proper descendant of a top-level cell. + // Check how it is related to the cells of the ShapeIndex. + r := e.iter.LocateCellID(idI) + if r == Indexed { + // This cell is a descendant of an index cell. + // Enqueue it and skip any other initial cells + // that are also descendants of this cell. + e.processOrEnqueue(e.iter.CellID(), e.iter.IndexCell()) + lastID := e.iter.CellID().RangeMax() + for i < len(e.initialCells) && e.initialCells[i] <= lastID { + i++ + } + } else { + // Enqueue the cell only if it contains at least one index cell. + if r == Subdivided { + e.processOrEnqueue(idI, nil) + } + i++ + } + } + } + } +} + +func (e *EdgeQuery) initCovering() { + // Find the range of Cells spanned by the index and choose a level such + // that the entire index can be covered with just a few cells. These are + // the "top-level" cells. There are two cases: + // + // - If the index spans more than one face, then there is one top-level cell + // per spanned face, just big enough to cover the index cells on that face. + // + // - If the index spans only one face, then we find the smallest cell "C" + // that covers the index cells on that face (just like the case above). + // Then for each of the 4 children of "C", if the child contains any index + // cells then we create a top-level cell that is big enough to just fit + // those index cells (i.e., shrinking the child as much as possible to fit + // its contents). This essentially replicates what would happen if we + // started with "C" as the top-level cell, since "C" would immediately be + // split, except that we take the time to prune the children further since + // this will save work on every subsequent query. + e.indexCovering = make([]CellID, 0, 6) + + // TODO(roberts): Use a single iterator below and save position + // information using pair {CellID, ShapeIndexCell}. + next := NewShapeIndexIterator(e.index, IteratorBegin) + last := NewShapeIndexIterator(e.index, IteratorEnd) + last.Prev() + if next.CellID() != last.CellID() { + // The index has at least two cells. Choose a level such that the entire + // index can be spanned with at most 6 cells (if the index spans multiple + // faces) or 4 cells (it the index spans a single face). + level, ok := next.CellID().CommonAncestorLevel(last.CellID()) + if !ok { + level = 0 + } else { + level++ + } + + // Visit each potential top-level cell except the last (handled below). + lastID := last.CellID().Parent(level) + for id := next.CellID().Parent(level); id != lastID; id = id.Next() { + // Skip any top-level cells that don't contain any index cells. + if id.RangeMax() < next.CellID() { + continue + } + + // Find the range of index cells contained by this top-level cell and + // then shrink the cell if necessary so that it just covers them. + cellFirst := next.clone() + next.seek(id.RangeMax().Next()) + cellLast := next.clone() + cellLast.Prev() + e.addInitialRange(cellFirst, cellLast) + break + } + + } + e.addInitialRange(next, last) +} + +// addInitialRange adds an entry to the indexCovering and indexCells that covers the given +// inclusive range of cells. +// +// This requires that first and last cells have a common ancestor. +func (e *EdgeQuery) addInitialRange(first, last *ShapeIndexIterator) { + if first.CellID() == last.CellID() { + // The range consists of a single index cell. + e.indexCovering = append(e.indexCovering, first.CellID()) + e.indexCells = append(e.indexCells, first.IndexCell()) + } else { + // Add the lowest common ancestor of the given range. + level, _ := first.CellID().CommonAncestorLevel(last.CellID()) + e.indexCovering = append(e.indexCovering, first.CellID().Parent(level)) + e.indexCells = append(e.indexCells, nil) + } +} + +// processEdges processes all the edges of the given index cell. +func (e *EdgeQuery) processEdges(entry *queryQueueEntry) { + for _, clipped := range entry.indexCell.shapes { + shape := e.index.Shape(clipped.shapeID) + for j := 0; j < clipped.numEdges(); j++ { + e.maybeAddResult(shape, int32(clipped.edges[j])) + } + } +} + +// processOrEnqueue the given cell id and indexCell. +func (e *EdgeQuery) processOrEnqueue(id CellID, indexCell *ShapeIndexCell) { + if indexCell != nil { + // If this index cell has only a few edges, then it is faster to check + // them directly rather than computing the minimum distance to the Cell + // and inserting it into the queue. + const minEdgesToEnqueue = 10 + numEdges := indexCell.numEdges() + if numEdges == 0 { + return + } + if numEdges < minEdgesToEnqueue { + // Set "distance" to zero to avoid the expense of computing it. + e.processEdges(&queryQueueEntry{ + distance: e.target.distance().zero(), + id: id, + indexCell: indexCell, + }) + return + } + } + + // Otherwise compute the minimum distance to any point in the cell and add + // it to the priority queue. + cell := CellFromCellID(id) + dist := e.distanceLimit + var ok bool + if dist, ok = e.target.updateDistanceToCell(cell, dist); !ok { + return + } + if e.useConservativeCellDistance { + // Ensure that "distance" is a lower bound on the true distance to the cell. + dist = dist.sub(e.target.distance().fromChordAngle(e.opts.maxError)) + } + + e.queue.push(&queryQueueEntry{ + distance: dist, + id: id, + indexCell: indexCell, + }) +} + +func (e *EdgeQuery) GetEdge(result EdgeQueryResult) Edge { + return e.index.Shape(result.shapeID).Edge(int(result.edgeID)) +} + +func (e *EdgeQuery) Project(point Point, result EdgeQueryResult) Point { + if result.edgeID < 0 { + return point + } + + edge := e.GetEdge(result) + return Project(point, edge.V0, edge.V1) +} + +// TODO(roberts): Remaining pieces +// GetEdge +// Project diff --git a/vendor/github.com/blevesearch/geo/s2/edge_tessellator.go b/vendor/github.com/blevesearch/geo/s2/edge_tessellator.go new file mode 100644 index 0000000..1d5805c --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/edge_tessellator.go @@ -0,0 +1,291 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "github.com/golang/geo/r2" + "github.com/golang/geo/s1" +) + +// Tessellation is implemented by subdividing the edge until the estimated +// maximum error is below the given tolerance. Estimating error is a hard +// problem, especially when the only methods available are point evaluation of +// the projection and its inverse. (These are the only methods that +// Projection provides, which makes it easier and less error-prone to +// implement new projections.) +// +// One technique that significantly increases robustness is to treat the +// geodesic and projected edges as parametric curves rather than geometric ones. +// Given a spherical edge AB and a projection p:S2->R2, let f(t) be the +// normalized arc length parametrization of AB and let g(t) be the normalized +// arc length parameterization of the projected edge p(A)p(B). (In other words, +// f(0)=A, f(1)=B, g(0)=p(A), g(1)=p(B).) We now define the geometric error as +// the maximum distance from the point p^-1(g(t)) to the geodesic edge AB for +// any t in [0,1], where p^-1 denotes the inverse projection. In other words, +// the geometric error is the maximum distance from any point on the projected +// edge (mapped back onto the sphere) to the geodesic edge AB. On the other +// hand we define the parametric error as the maximum distance between the +// points f(t) and p^-1(g(t)) for any t in [0,1], i.e. the maximum distance +// (measured on the sphere) between the geodesic and projected points at the +// same interpolation fraction t. +// +// The easiest way to estimate the parametric error is to simply evaluate both +// edges at their midpoints and measure the distance between them (the "midpoint +// method"). This is very fast and works quite well for most edges, however it +// has one major drawback: it doesn't handle points of inflection (i.e., points +// where the curvature changes sign). For example, edges in the Mercator and +// Plate Carree projections always curve towards the equator relative to the +// corresponding geodesic edge, so in these projections there is a point of +// inflection whenever the projected edge crosses the equator. The worst case +// occurs when the edge endpoints have different longitudes but the same +// absolute latitude, since in that case the error is non-zero but the edges +// have exactly the same midpoint (on the equator). +// +// One solution to this problem is to split the input edges at all inflection +// points (i.e., along the equator in the case of the Mercator and Plate Carree +// projections). However for general projections these inflection points can +// occur anywhere on the sphere (e.g., consider the Transverse Mercator +// projection). This could be addressed by adding methods to the S2Projection +// interface to split edges at inflection points but this would make it harder +// and more error-prone to implement new projections. +// +// Another problem with this approach is that the midpoint method sometimes +// underestimates the true error even when edges do not cross the equator. +// For the Plate Carree and Mercator projections, the midpoint method can +// underestimate the error by up to 3%. +// +// Both of these problems can be solved as follows. We assume that the error +// can be modeled as a convex combination of two worst-case functions, one +// where the error is maximized at the edge midpoint and another where the +// error is *minimized* (i.e., zero) at the edge midpoint. For example, we +// could choose these functions as: +// +// E1(x) = 1 - x^2 +// E2(x) = x * (1 - x^2) +// +// where for convenience we use an interpolation parameter "x" in the range +// [-1, 1] rather than the original "t" in the range [0, 1]. Note that both +// error functions must have roots at x = {-1, 1} since the error must be zero +// at the edge endpoints. E1 is simply a parabola whose maximum value is 1 +// attained at x = 0, while E2 is a cubic with an additional root at x = 0, +// and whose maximum value is 2 * sqrt(3) / 9 attained at x = 1 / sqrt(3). +// +// Next, it is convenient to scale these functions so that the both have a +// maximum value of 1. E1 already satisfies this requirement, and we simply +// redefine E2 as +// +// E2(x) = x * (1 - x^2) / (2 * sqrt(3) / 9) +// +// Now define x0 to be the point where these two functions intersect, i.e. the +// point in the range (-1, 1) where E1(x0) = E2(x0). This value has the very +// convenient property that if we evaluate the actual error E(x0), then the +// maximum error on the entire interval [-1, 1] is bounded by +// +// E(x) <= E(x0) / E1(x0) +// +// since whether the error is modeled using E1 or E2, the resulting function +// has the same maximum value (namely E(x0) / E1(x0)). If it is modeled as +// some other convex combination of E1 and E2, the maximum value can only +// decrease. +// +// Finally, since E2 is not symmetric about the y-axis, we must also allow for +// the possibility that the error is a convex combination of E1 and -E2. This +// can be handled by evaluating the error at E(-x0) as well, and then +// computing the final error bound as +// +// E(x) <= max(E(x0), E(-x0)) / E1(x0) . +// +// Effectively, this method is simply evaluating the error at two points about +// 1/3 and 2/3 of the way along the edges, and then scaling the maximum of +// these two errors by a constant factor. Intuitively, the reason this works +// is that if the two edges cross somewhere in the interior, then at least one +// of these points will be far from the crossing. +// +// The actual algorithm implemented below has some additional refinements. +// First, edges longer than 90 degrees are always subdivided; this avoids +// various unusual situations that can happen with very long edges, and there +// is really no reason to avoid adding vertices to edges that are so long. +// +// Second, the error function E1 above needs to be modified to take into +// account spherical distortions. (It turns out that spherical distortions are +// beneficial in the case of E2, i.e. they only make its error estimates +// slightly more conservative.) To do this, we model E1 as the maximum error +// in a Plate Carree edge of length 90 degrees or less. This turns out to be +// an edge from 45:-90 to 45:90 (in lat:lng format). The corresponding error +// as a function of "x" in the range [-1, 1] can be computed as the distance +// between the Plate Caree edge point (45, 90 * x) and the geodesic +// edge point (90 - 45 * abs(x), 90 * sgn(x)). Using the Haversine formula, +// the corresponding function E1 (normalized to have a maximum value of 1) is: +// +// E1(x) = +// asin(sqrt(sin(Pi / 8 * (1 - x)) ^ 2 + +// sin(Pi / 4 * (1 - x)) ^ 2 * cos(Pi / 4) * sin(Pi / 4 * x))) / +// asin(sqrt((1 - 1 / sqrt(2)) / 2)) +// +// Note that this function does not need to be evaluated at runtime, it +// simply affects the calculation of the value x0 where E1(x0) = E2(x0) +// and the corresponding scaling factor C = 1 / E1(x0). +// +// ------------------------------------------------------------------ +// +// In the case of the Mercator and Plate Carree projections this strategy +// produces a conservative upper bound (verified using 10 million random +// edges). Furthermore the bound is nearly tight; the scaling constant is +// C = 1.19289, whereas the maximum observed value was 1.19254. +// +// Compared to the simpler midpoint evaluation method, this strategy requires +// more function evaluations (currently twice as many, but with a smarter +// tessellation algorithm it will only be 50% more). It also results in a +// small amount of additional tessellation (about 1.5%) compared to the +// midpoint method, but this is due almost entirely to the fact that the +// midpoint method does not yield conservative error estimates. +// +// For random edges with a tolerance of 1 meter, the expected amount of +// overtessellation is as follows: +// +// Midpoint Method Cubic Method +// Plate Carree 1.8% 3.0% +// Mercator 15.8% 17.4% + +const ( + // tessellationInterpolationFraction is the fraction at which the two edges + // are evaluated in order to measure the error between them. (Edges are + // evaluated at two points measured this fraction from either end.) + tessellationInterpolationFraction = 0.31215691082248312 + tessellationScaleFactor = 0.83829992569888509 + + // minTessellationTolerance is the minimum supported tolerance (which + // corresponds to a distance less than 1 micrometer on the Earth's + // surface, but is still much larger than the expected projection and + // interpolation errors). + minTessellationTolerance s1.Angle = 1e-13 +) + +// EdgeTessellator converts an edge in a given projection (e.g., Mercator) into +// a chain of spherical geodesic edges such that the maximum distance between +// the original edge and the geodesic edge chain is at most the requested +// tolerance. Similarly, it can convert a spherical geodesic edge into a chain +// of edges in a given 2D projection such that the maximum distance between the +// geodesic edge and the chain of projected edges is at most the requested tolerance. +// +// Method | Input | Output +// ------------|------------------------|----------------------- +// Projected | S2 geodesics | Planar projected edges +// Unprojected | Planar projected edges | S2 geodesics +type EdgeTessellator struct { + projection Projection + + // The given tolerance scaled by a constant fraction so that it can be + // compared against the result returned by estimateMaxError. + scaledTolerance s1.ChordAngle +} + +// NewEdgeTessellator creates a new edge tessellator for the given projection and tolerance. +func NewEdgeTessellator(p Projection, tolerance s1.Angle) *EdgeTessellator { + return &EdgeTessellator{ + projection: p, + scaledTolerance: s1.ChordAngleFromAngle(maxAngle(tolerance, minTessellationTolerance)), + } +} + +// AppendProjected converts the spherical geodesic edge AB to a chain of planar edges +// in the given projection and returns the corresponding vertices. +// +// If the given projection has one or more coordinate axes that wrap, then +// every vertex's coordinates will be as close as possible to the previous +// vertex's coordinates. Note that this may yield vertices whose +// coordinates are outside the usual range. For example, tessellating the +// edge (0:170, 0:-170) (in lat:lng notation) yields (0:170, 0:190). +func (e *EdgeTessellator) AppendProjected(a, b Point, vertices []r2.Point) []r2.Point { + pa := e.projection.Project(a) + if len(vertices) == 0 { + vertices = []r2.Point{pa} + } else { + pa = e.projection.WrapDestination(vertices[len(vertices)-1], pa) + } + + pb := e.projection.Project(b) + return e.appendProjected(pa, a, pb, b, vertices) +} + +// appendProjected splits a geodesic edge AB as necessary and returns the +// projected vertices appended to the given vertices. +// +// The maximum recursion depth is (math.Pi / minTessellationTolerance) < 45 +func (e *EdgeTessellator) appendProjected(pa r2.Point, a Point, pbIn r2.Point, b Point, vertices []r2.Point) []r2.Point { + pb := e.projection.WrapDestination(pa, pbIn) + if e.estimateMaxError(pa, a, pb, b) <= e.scaledTolerance { + return append(vertices, pb) + } + + mid := Point{a.Add(b.Vector).Normalize()} + pmid := e.projection.WrapDestination(pa, e.projection.Project(mid)) + vertices = e.appendProjected(pa, a, pmid, mid, vertices) + return e.appendProjected(pmid, mid, pb, b, vertices) +} + +// AppendUnprojected converts the planar edge AB in the given projection to a chain of +// spherical geodesic edges and returns the vertices. +// +// Note that to construct a Loop, you must eliminate the duplicate first and last +// vertex. Note also that if the given projection involves coordinate wrapping +// (e.g. across the 180 degree meridian) then the first and last vertices may not +// be exactly the same. +func (e *EdgeTessellator) AppendUnprojected(pa, pb r2.Point, vertices []Point) []Point { + a := e.projection.Unproject(pa) + b := e.projection.Unproject(pb) + + if len(vertices) == 0 { + vertices = []Point{a} + } + + // Note that coordinate wrapping can create a small amount of error. For + // example in the edge chain "0:-175, 0:179, 0:-177", the first edge is + // transformed into "0:-175, 0:-181" while the second is transformed into + // "0:179, 0:183". The two coordinate pairs for the middle vertex + // ("0:-181" and "0:179") may not yield exactly the same S2Point. + return e.appendUnprojected(pa, a, pb, b, vertices) +} + +// appendUnprojected interpolates a projected edge and appends the corresponding +// points on the sphere. +func (e *EdgeTessellator) appendUnprojected(pa r2.Point, a Point, pbIn r2.Point, b Point, vertices []Point) []Point { + pb := e.projection.WrapDestination(pa, pbIn) + if e.estimateMaxError(pa, a, pb, b) <= e.scaledTolerance { + return append(vertices, b) + } + + pmid := e.projection.Interpolate(0.5, pa, pb) + mid := e.projection.Unproject(pmid) + + vertices = e.appendUnprojected(pa, a, pmid, mid, vertices) + return e.appendUnprojected(pmid, mid, pb, b, vertices) +} + +func (e *EdgeTessellator) estimateMaxError(pa r2.Point, a Point, pb r2.Point, b Point) s1.ChordAngle { + // See the algorithm description at the top of this file. + // We always tessellate edges longer than 90 degrees on the sphere, since the + // approximation below is not robust enough to handle such edges. + if a.Dot(b.Vector) < -1e-14 { + return s1.InfChordAngle() + } + t1 := tessellationInterpolationFraction + t2 := 1 - tessellationInterpolationFraction + mid1 := Interpolate(t1, a, b) + mid2 := Interpolate(t2, a, b) + pmid1 := e.projection.Unproject(e.projection.Interpolate(t1, pa, pb)) + pmid2 := e.projection.Unproject(e.projection.Interpolate(t2, pa, pb)) + return maxChordAngle(ChordAngleBetweenPoints(mid1, pmid1), ChordAngleBetweenPoints(mid2, pmid2)) +} diff --git a/vendor/github.com/blevesearch/geo/s2/encode.go b/vendor/github.com/blevesearch/geo/s2/encode.go new file mode 100644 index 0000000..00d0adc --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/encode.go @@ -0,0 +1,224 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "encoding/binary" + "io" + "math" +) + +const ( + // encodingVersion is the current version of the encoding + // format that is compatible with C++ and other S2 libraries. + encodingVersion = int8(1) + + // encodingCompressedVersion is the current version of the + // compressed format. + encodingCompressedVersion = int8(4) +) + +// encoder handles the specifics of encoding for S2 types. +type encoder struct { + w io.Writer // the real writer passed to Encode + err error +} + +func (e *encoder) writeUvarint(x uint64) { + if e.err != nil { + return + } + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, e.err = e.w.Write(buf[:n]) +} + +func (e *encoder) writeBool(x bool) { + if e.err != nil { + return + } + var val int8 + if x { + val = 1 + } + e.err = binary.Write(e.w, binary.LittleEndian, val) +} + +func (e *encoder) writeInt8(x int8) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeInt16(x int16) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeInt32(x int32) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeInt64(x int64) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeUint8(x uint8) { + if e.err != nil { + return + } + _, e.err = e.w.Write([]byte{x}) +} + +func (e *encoder) writeUint32(x uint32) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeUint64(x uint64) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeFloat32(x float32) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeFloat64(x float64) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +type byteReader interface { + io.Reader + io.ByteReader +} + +// byteReaderAdapter embellishes an io.Reader with a ReadByte method, +// so that it implements the io.ByteReader interface. +type byteReaderAdapter struct { + io.Reader +} + +func (b byteReaderAdapter) ReadByte() (byte, error) { + buf := []byte{0} + _, err := io.ReadFull(b, buf) + return buf[0], err +} + +func asByteReader(r io.Reader) byteReader { + if br, ok := r.(byteReader); ok { + return br + } + return byteReaderAdapter{r} +} + +type decoder struct { + r byteReader // the real reader passed to Decode + err error + buf []byte +} + +// Get a buffer of size 8, to avoid allocating over and over. +func (d *decoder) buffer() []byte { + if d.buf == nil { + d.buf = make([]byte, 8) + } + return d.buf +} + +func (d *decoder) readBool() (x bool) { + if d.err != nil { + return + } + var val int8 + d.err = binary.Read(d.r, binary.LittleEndian, &val) + return val == 1 +} + +func (d *decoder) readInt8() (x int8) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readInt64() (x int64) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readUint8() (x uint8) { + if d.err != nil { + return + } + x, d.err = d.r.ReadByte() + return +} + +func (d *decoder) readUint32() (x uint32) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readUint64() (x uint64) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readFloat64() float64 { + if d.err != nil { + return 0 + } + buf := d.buffer() + _, d.err = io.ReadFull(d.r, buf) + return math.Float64frombits(binary.LittleEndian.Uint64(buf)) +} + +func (d *decoder) readUvarint() (x uint64) { + if d.err != nil { + return + } + x, d.err = binary.ReadUvarint(d.r) + return +} diff --git a/vendor/github.com/blevesearch/geo/s2/interleave.go b/vendor/github.com/blevesearch/geo/s2/interleave.go new file mode 100644 index 0000000..6ac6ef5 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/interleave.go @@ -0,0 +1,143 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +/* +The lookup table below can convert a sequence of interleaved 8 bits into +non-interleaved 4 bits. The table can convert both odd and even bits at the +same time, and lut[x & 0x55] converts the even bits (bits 0, 2, 4 and 6), +while lut[x & 0xaa] converts the odd bits (bits 1, 3, 5 and 7). + +The lookup table below was generated using the following python code: + + def deinterleave(bits): + if bits == 0: return 0 + if bits < 4: return 1 + return deinterleave(bits / 4) * 2 + deinterleave(bits & 3) + + for i in range(256): print "0x%x," % deinterleave(i), +*/ +var deinterleaveLookup = [256]uint32{ + 0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x3, 0x3, + 0x2, 0x3, 0x3, 0x3, 0x2, 0x3, 0x3, 0x3, + 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, + 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, + 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, + 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, + 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, + 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, + + 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, + 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + + 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, + 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + + 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, + 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, +} + +// deinterleaveUint32 decodes the interleaved values. +func deinterleaveUint32(code uint64) (uint32, uint32) { + x := (deinterleaveLookup[code&0x55]) | + (deinterleaveLookup[(code>>8)&0x55] << 4) | + (deinterleaveLookup[(code>>16)&0x55] << 8) | + (deinterleaveLookup[(code>>24)&0x55] << 12) | + (deinterleaveLookup[(code>>32)&0x55] << 16) | + (deinterleaveLookup[(code>>40)&0x55] << 20) | + (deinterleaveLookup[(code>>48)&0x55] << 24) | + (deinterleaveLookup[(code>>56)&0x55] << 28) + y := (deinterleaveLookup[code&0xaa]) | + (deinterleaveLookup[(code>>8)&0xaa] << 4) | + (deinterleaveLookup[(code>>16)&0xaa] << 8) | + (deinterleaveLookup[(code>>24)&0xaa] << 12) | + (deinterleaveLookup[(code>>32)&0xaa] << 16) | + (deinterleaveLookup[(code>>40)&0xaa] << 20) | + (deinterleaveLookup[(code>>48)&0xaa] << 24) | + (deinterleaveLookup[(code>>56)&0xaa] << 28) + return x, y +} + +var interleaveLookup = [256]uint64{ + 0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015, + 0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055, + 0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115, + 0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155, + 0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415, + 0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455, + 0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515, + 0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555, + + 0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015, + 0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055, + 0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115, + 0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155, + 0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415, + 0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455, + 0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515, + 0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555, + + 0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015, + 0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055, + 0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115, + 0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155, + 0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415, + 0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455, + 0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515, + 0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555, + + 0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015, + 0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055, + 0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115, + 0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155, + 0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415, + 0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455, + 0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515, + 0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555, +} + +// interleaveUint32 interleaves the given arguments into the return value. +// +// The 0-bit in val0 will be the 0-bit in the return value. +// The 0-bit in val1 will be the 1-bit in the return value. +// The 1-bit of val0 will be the 2-bit in the return value, and so on. +func interleaveUint32(x, y uint32) uint64 { + return (interleaveLookup[x&0xff]) | + (interleaveLookup[(x>>8)&0xff] << 16) | + (interleaveLookup[(x>>16)&0xff] << 32) | + (interleaveLookup[x>>24] << 48) | + (interleaveLookup[y&0xff] << 1) | + (interleaveLookup[(y>>8)&0xff] << 17) | + (interleaveLookup[(y>>16)&0xff] << 33) | + (interleaveLookup[y>>24] << 49) +} diff --git a/vendor/github.com/blevesearch/geo/s2/latlng.go b/vendor/github.com/blevesearch/geo/s2/latlng.go new file mode 100644 index 0000000..a750304 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/latlng.go @@ -0,0 +1,101 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "math" + + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +const ( + northPoleLat = s1.Angle(math.Pi/2) * s1.Radian + southPoleLat = -northPoleLat +) + +// LatLng represents a point on the unit sphere as a pair of angles. +type LatLng struct { + Lat, Lng s1.Angle +} + +// LatLngFromDegrees returns a LatLng for the coordinates given in degrees. +func LatLngFromDegrees(lat, lng float64) LatLng { + return LatLng{s1.Angle(lat) * s1.Degree, s1.Angle(lng) * s1.Degree} +} + +// IsValid returns true iff the LatLng is normalized, with Lat ∈ [-π/2,π/2] and Lng ∈ [-π,π]. +func (ll LatLng) IsValid() bool { + return math.Abs(ll.Lat.Radians()) <= math.Pi/2 && math.Abs(ll.Lng.Radians()) <= math.Pi +} + +// Normalized returns the normalized version of the LatLng, +// with Lat clamped to [-π/2,π/2] and Lng wrapped in [-π,π]. +func (ll LatLng) Normalized() LatLng { + lat := ll.Lat + if lat > northPoleLat { + lat = northPoleLat + } else if lat < southPoleLat { + lat = southPoleLat + } + lng := s1.Angle(math.Remainder(ll.Lng.Radians(), 2*math.Pi)) * s1.Radian + return LatLng{lat, lng} +} + +func (ll LatLng) String() string { return fmt.Sprintf("[%v, %v]", ll.Lat, ll.Lng) } + +// Distance returns the angle between two LatLngs. +func (ll LatLng) Distance(ll2 LatLng) s1.Angle { + // Haversine formula, as used in C++ S2LatLng::GetDistance. + lat1, lat2 := ll.Lat.Radians(), ll2.Lat.Radians() + lng1, lng2 := ll.Lng.Radians(), ll2.Lng.Radians() + dlat := math.Sin(0.5 * (lat2 - lat1)) + dlng := math.Sin(0.5 * (lng2 - lng1)) + x := dlat*dlat + dlng*dlng*math.Cos(lat1)*math.Cos(lat2) + return s1.Angle(2*math.Atan2(math.Sqrt(x), math.Sqrt(math.Max(0, 1-x)))) * s1.Radian +} + +// NOTE(mikeperrow): The C++ implementation publicly exposes latitude/longitude +// functions. Let's see if that's really necessary before exposing the same functionality. + +func latitude(p Point) s1.Angle { + return s1.Angle(math.Atan2(p.Z, math.Sqrt(p.X*p.X+p.Y*p.Y))) * s1.Radian +} + +func longitude(p Point) s1.Angle { + return s1.Angle(math.Atan2(p.Y, p.X)) * s1.Radian +} + +// PointFromLatLng returns an Point for the given LatLng. +// The maximum error in the result is 1.5 * dblEpsilon. (This does not +// include the error of converting degrees, E5, E6, or E7 into radians.) +func PointFromLatLng(ll LatLng) Point { + phi := ll.Lat.Radians() + theta := ll.Lng.Radians() + cosphi := math.Cos(phi) + return Point{r3.Vector{math.Cos(theta) * cosphi, math.Sin(theta) * cosphi, math.Sin(phi)}} +} + +// LatLngFromPoint returns an LatLng for a given Point. +func LatLngFromPoint(p Point) LatLng { + return LatLng{latitude(p), longitude(p)} +} + +// ApproxEqual reports whether the latitude and longitude of the two LatLngs +// are the same up to a small tolerance. +func (ll LatLng) ApproxEqual(other LatLng) bool { + return ll.Lat.ApproxEqual(other.Lat) && ll.Lng.ApproxEqual(other.Lng) +} diff --git a/vendor/github.com/blevesearch/geo/s2/lexicon.go b/vendor/github.com/blevesearch/geo/s2/lexicon.go new file mode 100644 index 0000000..41cbffd --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/lexicon.go @@ -0,0 +1,175 @@ +// Copyright 2020 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "encoding/binary" + "hash/adler32" + "math" + "sort" +) + +// TODO(roberts): If any of these are worth making public, change the +// method signatures and type names. + +// emptySetID represents the last ID that will ever be generated. +// (Non-negative IDs are reserved for singleton sets.) +var emptySetID = int32(math.MinInt32) + +// idSetLexicon compactly represents a set of non-negative +// integers such as array indices ("ID sets"). It is especially suitable when +// either (1) there are many duplicate sets, or (2) there are many singleton +// or empty sets. See also sequenceLexicon. +// +// Each distinct ID set is mapped to a 32-bit integer. Empty and singleton +// sets take up no additional space; the set itself is represented +// by the unique ID assigned to the set. Duplicate sets are automatically +// eliminated. Note also that ID sets are referred to using 32-bit integers +// rather than pointers. +type idSetLexicon struct { + idSets *sequenceLexicon +} + +func newIDSetLexicon() *idSetLexicon { + return &idSetLexicon{ + idSets: newSequenceLexicon(), + } +} + +// add adds the given set of integers to the lexicon if it is not already +// present, and return the unique ID for this set. The values are automatically +// sorted and duplicates are removed. +// +// The primary difference between this and sequenceLexicon are: +// 1. Empty and singleton sets are represented implicitly; they use no space. +// 2. Sets are represented rather than sequences; the ordering of values is +// not important and duplicates are removed. +// 3. The values must be 32-bit non-negative integers only. +func (l *idSetLexicon) add(ids ...int32) int32 { + // Empty sets have a special ID chosen not to conflict with other IDs. + if len(ids) == 0 { + return emptySetID + } + + // Singleton sets are represented by their element. + if len(ids) == 1 { + return ids[0] + } + + // Canonicalize the set by sorting and removing duplicates. + // + // Creates a new slice in order to not alter the supplied values. + set := uniqueInt32s(ids) + + // Non-singleton sets are represented by the bitwise complement of the ID + // returned by the sequenceLexicon + return ^l.idSets.add(set) +} + +// idSet returns the set of integers corresponding to an ID returned by add. +func (l *idSetLexicon) idSet(setID int32) []int32 { + if setID >= 0 { + return []int32{setID} + } + if setID == emptySetID { + return []int32{} + } + + return l.idSets.sequence(^setID) +} + +func (l *idSetLexicon) clear() { + l.idSets.clear() +} + +// sequenceLexicon compactly represents a sequence of values (e.g., tuples). +// It automatically eliminates duplicates slices, and maps the remaining +// sequences to sequentially increasing integer IDs. See also idSetLexicon. +// +// Each distinct sequence is mapped to a 32-bit integer. +type sequenceLexicon struct { + values []int32 + begins []uint32 + + // idSet is a mapping of a sequence hash to sequence index in the lexicon. + idSet map[uint32]int32 +} + +func newSequenceLexicon() *sequenceLexicon { + return &sequenceLexicon{ + begins: []uint32{0}, + idSet: make(map[uint32]int32), + } +} + +// clears all data from the lexicon. +func (l *sequenceLexicon) clear() { + l.values = nil + l.begins = []uint32{0} + l.idSet = make(map[uint32]int32) +} + +// add adds the given value to the lexicon if it is not already present, and +// returns its ID. IDs are assigned sequentially starting from zero. +func (l *sequenceLexicon) add(ids []int32) int32 { + if id, ok := l.idSet[hashSet(ids)]; ok { + return id + } + for _, v := range ids { + l.values = append(l.values, v) + } + l.begins = append(l.begins, uint32(len(l.values))) + + id := int32(len(l.begins)) - 2 + l.idSet[hashSet(ids)] = id + + return id +} + +// sequence returns the original sequence of values for the given ID. +func (l *sequenceLexicon) sequence(id int32) []int32 { + return l.values[l.begins[id]:l.begins[id+1]] +} + +// size reports the number of value sequences in the lexicon. +func (l *sequenceLexicon) size() int { + // Subtract one because the list of begins starts out with the first element set to 0. + return len(l.begins) - 1 +} + +// hash returns a hash of this sequence of int32s. +func hashSet(s []int32) uint32 { + // TODO(roberts): We just need a way to nicely hash all the values down to + // a 32-bit value. To ensure no unnecessary dependencies we use the core + // library types available to do this. Is there a better option? + a := adler32.New() + binary.Write(a, binary.LittleEndian, s) + return a.Sum32() +} + +// uniqueInt32s returns the sorted and uniqued set of int32s from the input. +func uniqueInt32s(in []int32) []int32 { + var vals []int32 + m := make(map[int32]bool) + for _, i := range in { + if m[i] { + continue + } + m[i] = true + vals = append(vals, i) + } + sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] }) + return vals +} diff --git a/vendor/github.com/blevesearch/geo/s2/loop.go b/vendor/github.com/blevesearch/geo/s2/loop.go new file mode 100644 index 0000000..d0035df --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/loop.go @@ -0,0 +1,1833 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// Loop represents a simple spherical polygon. It consists of a sequence +// of vertices where the first vertex is implicitly connected to the +// last. All loops are defined to have a CCW orientation, i.e. the interior of +// the loop is on the left side of the edges. This implies that a clockwise +// loop enclosing a small area is interpreted to be a CCW loop enclosing a +// very large area. +// +// Loops are not allowed to have any duplicate vertices (whether adjacent or +// not). Non-adjacent edges are not allowed to intersect, and furthermore edges +// of length 180 degrees are not allowed (i.e., adjacent vertices cannot be +// antipodal). Loops must have at least 3 vertices (except for the "empty" and +// "full" loops discussed below). +// +// There are two special loops: the "empty" loop contains no points and the +// "full" loop contains all points. These loops do not have any edges, but to +// preserve the invariant that every loop can be represented as a vertex +// chain, they are defined as having exactly one vertex each (see EmptyLoop +// and FullLoop). +type Loop struct { + vertices []Point + + // originInside keeps a precomputed value whether this loop contains the origin + // versus computing from the set of vertices every time. + originInside bool + + // depth is the nesting depth of this Loop if it is contained by a Polygon + // or other shape and is used to determine if this loop represents a hole + // or a filled in portion. + depth int + + // bound is a conservative bound on all points contained by this loop. + // If l.ContainsPoint(P), then l.bound.ContainsPoint(P). + bound Rect + + // Since bound is not exact, it is possible that a loop A contains + // another loop B whose bounds are slightly larger. subregionBound + // has been expanded sufficiently to account for this error, i.e. + // if A.Contains(B), then A.subregionBound.Contains(B.bound). + subregionBound Rect + + // index is the spatial index for this Loop. + index *ShapeIndex +} + +// LoopFromPoints constructs a loop from the given points. +func LoopFromPoints(pts []Point) *Loop { + l := &Loop{ + vertices: pts, + index: NewShapeIndex(), + } + + l.initOriginAndBound() + return l +} + +// LoopFromCell constructs a loop corresponding to the given cell. +// +// Note that the loop and cell *do not* contain exactly the same set of +// points, because Loop and Cell have slightly different definitions of +// point containment. For example, a Cell vertex is contained by all +// four neighboring Cells, but it is contained by exactly one of four +// Loops constructed from those cells. As another example, the cell +// coverings of cell and LoopFromCell(cell) will be different, because the +// loop contains points on its boundary that actually belong to other cells +// (i.e., the covering will include a layer of neighboring cells). +func LoopFromCell(c Cell) *Loop { + l := &Loop{ + vertices: []Point{ + c.Vertex(0), + c.Vertex(1), + c.Vertex(2), + c.Vertex(3), + }, + index: NewShapeIndex(), + } + + l.initOriginAndBound() + return l +} + +// These two points are used for the special Empty and Full loops. +var ( + emptyLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: 1}} + fullLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: -1}} +) + +// EmptyLoop returns a special "empty" loop. +func EmptyLoop() *Loop { + return LoopFromPoints([]Point{emptyLoopPoint}) +} + +// FullLoop returns a special "full" loop. +func FullLoop() *Loop { + return LoopFromPoints([]Point{fullLoopPoint}) +} + +// initOriginAndBound sets the origin containment for the given point and then calls +// the initialization for the bounds objects and the internal index. +func (l *Loop) initOriginAndBound() { + if len(l.vertices) < 3 { + // Check for the special "empty" and "full" loops (which have one vertex). + if !l.isEmptyOrFull() { + l.originInside = false + return + } + + // This is the special empty or full loop, so the origin depends on if + // the vertex is in the southern hemisphere or not. + l.originInside = l.vertices[0].Z < 0 + } else { + // Point containment testing is done by counting edge crossings starting + // at a fixed point on the sphere (OriginPoint). We need to know whether + // the reference point (OriginPoint) is inside or outside the loop before + // we can construct the ShapeIndex. We do this by first guessing that + // it is outside, and then seeing whether we get the correct containment + // result for vertex 1. If the result is incorrect, the origin must be + // inside the loop. + // + // A loop with consecutive vertices A,B,C contains vertex B if and only if + // the fixed vector R = B.Ortho is contained by the wedge ABC. The + // wedge is closed at A and open at C, i.e. the point B is inside the loop + // if A = R but not if C = R. This convention is required for compatibility + // with VertexCrossing. (Note that we can't use OriginPoint + // as the fixed vector because of the possibility that B == OriginPoint.) + l.originInside = false + v1Inside := OrderedCCW(Point{l.vertices[1].Ortho()}, l.vertices[0], l.vertices[2], l.vertices[1]) + if v1Inside != l.ContainsPoint(l.vertices[1]) { + l.originInside = true + } + } + + // We *must* call initBound before initializing the index, because + // initBound calls ContainsPoint which does a bounds check before using + // the index. + l.initBound() + + // Create a new index and add us to it. + l.index = NewShapeIndex() + l.index.Add(l) +} + +// initBound sets up the approximate bounding Rects for this loop. +func (l *Loop) initBound() { + if len(l.vertices) == 0 { + *l = *EmptyLoop() + return + } + // Check for the special "empty" and "full" loops. + if l.isEmptyOrFull() { + if l.IsEmpty() { + l.bound = EmptyRect() + } else { + l.bound = FullRect() + } + l.subregionBound = l.bound + return + } + + // The bounding rectangle of a loop is not necessarily the same as the + // bounding rectangle of its vertices. First, the maximal latitude may be + // attained along the interior of an edge. Second, the loop may wrap + // entirely around the sphere (e.g. a loop that defines two revolutions of a + // candy-cane stripe). Third, the loop may include one or both poles. + // Note that a small clockwise loop near the equator contains both poles. + bounder := NewRectBounder() + for i := 0; i <= len(l.vertices); i++ { // add vertex 0 twice + bounder.AddPoint(l.Vertex(i)) + } + b := bounder.RectBound() + + if l.ContainsPoint(Point{r3.Vector{0, 0, 1}}) { + b = Rect{r1.Interval{b.Lat.Lo, math.Pi / 2}, s1.FullInterval()} + } + // If a loop contains the south pole, then either it wraps entirely + // around the sphere (full longitude range), or it also contains the + // north pole in which case b.Lng.IsFull() due to the test above. + // Either way, we only need to do the south pole containment test if + // b.Lng.IsFull(). + if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{0, 0, -1}}) { + b.Lat.Lo = -math.Pi / 2 + } + l.bound = b + l.subregionBound = ExpandForSubregions(l.bound) +} + +// Validate checks whether this is a valid loop. +func (l *Loop) Validate() error { + if err := l.findValidationErrorNoIndex(); err != nil { + return err + } + + // Check for intersections between non-adjacent edges (including at vertices) + // TODO(roberts): Once shapeutil gets findAnyCrossing uncomment this. + // return findAnyCrossing(l.index) + + return nil +} + +// findValidationErrorNoIndex reports whether this is not a valid loop, but +// skips checks that would require a ShapeIndex to be built for the loop. This +// is primarily used by Polygon to do validation so it doesn't trigger the +// creation of unneeded ShapeIndices. +func (l *Loop) findValidationErrorNoIndex() error { + // All vertices must be unit length. + for i, v := range l.vertices { + if !v.IsUnit() { + return fmt.Errorf("vertex %d is not unit length", i) + } + } + + // Loops must have at least 3 vertices (except for empty and full). + if len(l.vertices) < 3 { + if l.isEmptyOrFull() { + return nil // Skip remaining tests. + } + return fmt.Errorf("non-empty, non-full loops must have at least 3 vertices") + } + + // Loops are not allowed to have any duplicate vertices or edge crossings. + // We split this check into two parts. First we check that no edge is + // degenerate (identical endpoints). Then we check that there are no + // intersections between non-adjacent edges (including at vertices). The + // second check needs the ShapeIndex, so it does not fall within the scope + // of this method. + for i, v := range l.vertices { + if v == l.Vertex(i+1) { + return fmt.Errorf("edge %d is degenerate (duplicate vertex)", i) + } + + // Antipodal vertices are not allowed. + if other := (Point{l.Vertex(i + 1).Mul(-1)}); v == other { + return fmt.Errorf("vertices %d and %d are antipodal", i, + (i+1)%len(l.vertices)) + } + } + + return nil +} + +// Contains reports whether the region contained by this loop is a superset of the +// region contained by the given other loop. +func (l *Loop) Contains(o *Loop) bool { + // For a loop A to contain the loop B, all of the following must + // be true: + // + // (1) There are no edge crossings between A and B except at vertices. + // + // (2) At every vertex that is shared between A and B, the local edge + // ordering implies that A contains B. + // + // (3) If there are no shared vertices, then A must contain a vertex of B + // and B must not contain a vertex of A. (An arbitrary vertex may be + // chosen in each case.) + // + // The second part of (3) is necessary to detect the case of two loops whose + // union is the entire sphere, i.e. two loops that contains each other's + // boundaries but not each other's interiors. + if !l.subregionBound.Contains(o.bound) { + return false + } + + // Special cases to handle either loop being empty or full. + if l.isEmptyOrFull() || o.isEmptyOrFull() { + return l.IsFull() || o.IsEmpty() + } + + // Check whether there are any edge crossings, and also check the loop + // relationship at any shared vertices. + relation := &containsRelation{} + if hasCrossingRelation(l, o, relation) { + return false + } + + // There are no crossings, and if there are any shared vertices then A + // contains B locally at each shared vertex. + if relation.foundSharedVertex { + return true + } + + // Since there are no edge intersections or shared vertices, we just need to + // test condition (3) above. We can skip this test if we discovered that A + // contains at least one point of B while checking for edge crossings. + if !l.ContainsPoint(o.Vertex(0)) { + return false + } + + // We still need to check whether (A union B) is the entire sphere. + // Normally this check is very cheap due to the bounding box precondition. + if (o.subregionBound.Contains(l.bound) || o.bound.Union(l.bound).IsFull()) && + o.ContainsPoint(l.Vertex(0)) { + return false + } + return true +} + +// Intersects reports whether the region contained by this loop intersects the region +// contained by the other loop. +func (l *Loop) Intersects(o *Loop) bool { + // Given two loops, A and B, A.Intersects(B) if and only if !A.Complement().Contains(B). + // + // This code is similar to Contains, but is optimized for the case + // where both loops enclose less than half of the sphere. + if !l.bound.Intersects(o.bound) { + return false + } + + // Check whether there are any edge crossings, and also check the loop + // relationship at any shared vertices. + relation := &intersectsRelation{} + if hasCrossingRelation(l, o, relation) { + return true + } + if relation.foundSharedVertex { + return false + } + + // Since there are no edge intersections or shared vertices, the loops + // intersect only if A contains B, B contains A, or the two loops contain + // each other's boundaries. These checks are usually cheap because of the + // bounding box preconditions. Note that neither loop is empty (because of + // the bounding box check above), so it is safe to access vertex(0). + + // Check whether A contains B, or A and B contain each other's boundaries. + // (Note that A contains all the vertices of B in either case.) + if l.subregionBound.Contains(o.bound) || l.bound.Union(o.bound).IsFull() { + if l.ContainsPoint(o.Vertex(0)) { + return true + } + } + // Check whether B contains A. + if o.subregionBound.Contains(l.bound) { + if o.ContainsPoint(l.Vertex(0)) { + return true + } + } + return false +} + +// Equal reports whether two loops have the same vertices in the same linear order +// (i.e., cyclic rotations are not allowed). +func (l *Loop) Equal(other *Loop) bool { + if len(l.vertices) != len(other.vertices) { + return false + } + + for i, v := range l.vertices { + if v != other.Vertex(i) { + return false + } + } + return true +} + +// BoundaryEqual reports whether the two loops have the same boundary. This is +// true if and only if the loops have the same vertices in the same cyclic order +// (i.e., the vertices may be cyclically rotated). The empty and full loops are +// considered to have different boundaries. +func (l *Loop) BoundaryEqual(o *Loop) bool { + if len(l.vertices) != len(o.vertices) { + return false + } + + // Special case to handle empty or full loops. Since they have the same + // number of vertices, if one loop is empty/full then so is the other. + if l.isEmptyOrFull() { + return l.IsEmpty() == o.IsEmpty() + } + + // Loop through the vertices to find the first of ours that matches the + // starting vertex of the other loop. Use that offset to then 'align' the + // vertices for comparison. + for offset, vertex := range l.vertices { + if vertex == o.Vertex(0) { + // There is at most one starting offset since loop vertices are unique. + for i := 0; i < len(l.vertices); i++ { + if l.Vertex(i+offset) != o.Vertex(i) { + return false + } + } + return true + } + } + return false +} + +// compareBoundary returns +1 if this loop contains the boundary of the other loop, +// -1 if it excludes the boundary of the other, and 0 if the boundaries of the two +// loops cross. Shared edges are handled as follows: +// +// If XY is a shared edge, define Reversed(XY) to be true if XY +// appears in opposite directions in both loops. +// Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole. +// (Intuitively, this checks whether this loop contains a vanishingly small region +// extending from the boundary of the other toward the interior of the polygon to +// which the other belongs.) +// +// This function is used for testing containment and intersection of +// multi-loop polygons. Note that this method is not symmetric, since the +// result depends on the direction of this loop but not on the direction of +// the other loop (in the absence of shared edges). +// +// This requires that neither loop is empty, and if other loop IsFull, then it must not +// be a hole. +func (l *Loop) compareBoundary(o *Loop) int { + // The bounds must intersect for containment or crossing. + if !l.bound.Intersects(o.bound) { + return -1 + } + + // Full loops are handled as though the loop surrounded the entire sphere. + if l.IsFull() { + return 1 + } + if o.IsFull() { + return -1 + } + + // Check whether there are any edge crossings, and also check the loop + // relationship at any shared vertices. + relation := newCompareBoundaryRelation(o.IsHole()) + if hasCrossingRelation(l, o, relation) { + return 0 + } + if relation.foundSharedVertex { + if relation.containsEdge { + return 1 + } + return -1 + } + + // There are no edge intersections or shared vertices, so we can check + // whether A contains an arbitrary vertex of B. + if l.ContainsPoint(o.Vertex(0)) { + return 1 + } + return -1 +} + +// ContainsOrigin reports true if this loop contains s2.OriginPoint(). +func (l *Loop) ContainsOrigin() bool { + return l.originInside +} + +// ReferencePoint returns the reference point for this loop. +func (l *Loop) ReferencePoint() ReferencePoint { + return OriginReferencePoint(l.originInside) +} + +// NumEdges returns the number of edges in this shape. +func (l *Loop) NumEdges() int { + if l.isEmptyOrFull() { + return 0 + } + return len(l.vertices) +} + +// Edge returns the endpoints for the given edge index. +func (l *Loop) Edge(i int) Edge { + return Edge{l.Vertex(i), l.Vertex(i + 1)} +} + +// NumChains reports the number of contiguous edge chains in the Loop. +func (l *Loop) NumChains() int { + if l.IsEmpty() { + return 0 + } + return 1 +} + +// Chain returns the i-th edge chain in the Shape. +func (l *Loop) Chain(chainID int) Chain { + return Chain{0, l.NumEdges()} +} + +// ChainEdge returns the j-th edge of the i-th edge chain. +func (l *Loop) ChainEdge(chainID, offset int) Edge { + return Edge{l.Vertex(offset), l.Vertex(offset + 1)} +} + +// ChainPosition returns a ChainPosition pair (i, j) such that edgeID is the +// j-th edge of the Loop. +func (l *Loop) ChainPosition(edgeID int) ChainPosition { + return ChainPosition{0, edgeID} +} + +// Dimension returns the dimension of the geometry represented by this Loop. +func (l *Loop) Dimension() int { return 2 } + +func (l *Loop) typeTag() typeTag { return typeTagNone } + +func (l *Loop) privateInterface() {} + +// IsEmpty reports true if this is the special empty loop that contains no points. +func (l *Loop) IsEmpty() bool { + return l.isEmptyOrFull() && !l.ContainsOrigin() +} + +// IsFull reports true if this is the special full loop that contains all points. +func (l *Loop) IsFull() bool { + return l.isEmptyOrFull() && l.ContainsOrigin() +} + +// isEmptyOrFull reports true if this loop is either the "empty" or "full" special loops. +func (l *Loop) isEmptyOrFull() bool { + return len(l.vertices) == 1 +} + +// Vertices returns the vertices in the loop. +func (l *Loop) Vertices() []Point { + return l.vertices +} + +// RectBound returns a tight bounding rectangle. If the loop contains the point, +// the bound also contains it. +func (l *Loop) RectBound() Rect { + return l.bound +} + +// CapBound returns a bounding cap that may have more padding than the corresponding +// RectBound. The bound is conservative such that if the loop contains a point P, +// the bound also contains it. +func (l *Loop) CapBound() Cap { + return l.bound.CapBound() +} + +// Vertex returns the vertex for the given index. For convenience, the vertex indices +// wrap automatically for methods that do index math such as Edge. +// i.e., Vertex(NumEdges() + n) is the same as Vertex(n). +func (l *Loop) Vertex(i int) Point { + return l.vertices[i%len(l.vertices)] +} + +// OrientedVertex returns the vertex in reverse order if the loop represents a polygon +// hole. For example, arguments 0, 1, 2 are mapped to vertices n-1, n-2, n-3, where +// n == len(vertices). This ensures that the interior of the polygon is always to +// the left of the vertex chain. +// +// This requires: 0 <= i < 2 * len(vertices) +func (l *Loop) OrientedVertex(i int) Point { + j := i - len(l.vertices) + if j < 0 { + j = i + } + if l.IsHole() { + j = len(l.vertices) - 1 - j + } + return l.Vertex(j) +} + +// NumVertices returns the number of vertices in this loop. +func (l *Loop) NumVertices() int { + return len(l.vertices) +} + +// bruteForceContainsPoint reports if the given point is contained by this loop. +// This method does not use the ShapeIndex, so it is only preferable below a certain +// size of loop. +func (l *Loop) bruteForceContainsPoint(p Point) bool { + origin := OriginPoint() + inside := l.originInside + crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0)) + for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice + inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i)) + } + return inside +} + +// ContainsPoint returns true if the loop contains the point. +func (l *Loop) ContainsPoint(p Point) bool { + if !l.index.IsFresh() && !l.bound.ContainsPoint(p) { + return false + } + + // For small loops it is faster to just check all the crossings. We also + // use this method during loop initialization because InitOriginAndBound() + // calls Contains() before InitIndex(). Otherwise, we keep track of the + // number of calls to Contains() and only build the index when enough calls + // have been made so that we think it is worth the effort. Note that the + // code below is structured so that if many calls are made in parallel only + // one thread builds the index, while the rest continue using brute force + // until the index is actually available. + + const maxBruteForceVertices = 32 + // TODO(roberts): add unindexed contains calls tracking + + if len(l.index.shapes) == 0 || // Index has not been initialized yet. + len(l.vertices) <= maxBruteForceVertices { + return l.bruteForceContainsPoint(p) + } + + // Otherwise, look up the point in the index. + it := l.index.Iterator() + if !it.LocatePoint(p) { + return false + } + return l.iteratorContainsPoint(it, p) +} + +// ContainsCell reports whether the given Cell is contained by this Loop. +func (l *Loop) ContainsCell(target Cell) bool { + it := l.index.Iterator() + relation := it.LocateCellID(target.ID()) + + // If "target" is disjoint from all index cells, it is not contained. + // Similarly, if "target" is subdivided into one or more index cells then it + // is not contained, since index cells are subdivided only if they (nearly) + // intersect a sufficient number of edges. (But note that if "target" itself + // is an index cell then it may be contained, since it could be a cell with + // no edges in the loop interior.) + if relation != Indexed { + return false + } + + // Otherwise check if any edges intersect "target". + if l.boundaryApproxIntersects(it, target) { + return false + } + + // Otherwise check if the loop contains the center of "target". + return l.iteratorContainsPoint(it, target.Center()) +} + +// IntersectsCell reports whether this Loop intersects the given cell. +func (l *Loop) IntersectsCell(target Cell) bool { + it := l.index.Iterator() + relation := it.LocateCellID(target.ID()) + + // If target does not overlap any index cell, there is no intersection. + if relation == Disjoint { + return false + } + // If target is subdivided into one or more index cells, there is an + // intersection to within the ShapeIndex error bound (see Contains). + if relation == Subdivided { + return true + } + // If target is an index cell, there is an intersection because index cells + // are created only if they have at least one edge or they are entirely + // contained by the loop. + if it.CellID() == target.id { + return true + } + // Otherwise check if any edges intersect target. + if l.boundaryApproxIntersects(it, target) { + return true + } + // Otherwise check if the loop contains the center of target. + return l.iteratorContainsPoint(it, target.Center()) +} + +// CellUnionBound computes a covering of the Loop. +func (l *Loop) CellUnionBound() []CellID { + return l.CapBound().CellUnionBound() +} + +// boundaryApproxIntersects reports if the loop's boundary intersects target. +// It may also return true when the loop boundary does not intersect target but +// some edge comes within the worst-case error tolerance. +// +// This requires that it.Locate(target) returned Indexed. +func (l *Loop) boundaryApproxIntersects(it *ShapeIndexIterator, target Cell) bool { + aClipped := it.IndexCell().findByShapeID(0) + + // If there are no edges, there is no intersection. + if len(aClipped.edges) == 0 { + return false + } + + // We can save some work if target is the index cell itself. + if it.CellID() == target.ID() { + return true + } + + // Otherwise check whether any of the edges intersect target. + maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist) + bound := target.BoundUV().ExpandedByMargin(maxError) + for _, ai := range aClipped.edges { + v0, v1, ok := ClipToPaddedFace(l.Vertex(ai), l.Vertex(ai+1), target.Face(), maxError) + if ok && edgeIntersectsRect(v0, v1, bound) { + return true + } + } + return false +} + +// iteratorContainsPoint reports if the iterator that is positioned at the ShapeIndexCell +// that may contain p, contains the point p. +func (l *Loop) iteratorContainsPoint(it *ShapeIndexIterator, p Point) bool { + // Test containment by drawing a line segment from the cell center to the + // given point and counting edge crossings. + aClipped := it.IndexCell().findByShapeID(0) + inside := aClipped.containsCenter + if len(aClipped.edges) > 0 { + center := it.Center() + crosser := NewEdgeCrosser(center, p) + aiPrev := -2 + for _, ai := range aClipped.edges { + if ai != aiPrev+1 { + crosser.RestartAt(l.Vertex(ai)) + } + aiPrev = ai + inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(ai+1)) + } + } + return inside +} + +// RegularLoop creates a loop with the given number of vertices, all +// located on a circle of the specified radius around the given center. +func RegularLoop(center Point, radius s1.Angle, numVertices int) *Loop { + return RegularLoopForFrame(getFrame(center), radius, numVertices) +} + +// RegularLoopForFrame creates a loop centered around the z-axis of the given +// coordinate frame, with the first vertex in the direction of the positive x-axis. +func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop { + return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices)) +} + +// CanonicalFirstVertex returns a first index and a direction (either +1 or -1) +// such that the vertex sequence (first, first+dir, ..., first+(n-1)*dir) does +// not change when the loop vertex order is rotated or inverted. This allows the +// loop vertices to be traversed in a canonical order. The return values are +// chosen such that (first, ..., first+n*dir) are in the range [0, 2*n-1] as +// expected by the Vertex method. +func (l *Loop) CanonicalFirstVertex() (firstIdx, direction int) { + firstIdx = 0 + n := len(l.vertices) + for i := 1; i < n; i++ { + if l.Vertex(i).Cmp(l.Vertex(firstIdx).Vector) == -1 { + firstIdx = i + } + } + + // 0 <= firstIdx <= n-1, so (firstIdx+n*dir) <= 2*n-1. + if l.Vertex(firstIdx+1).Cmp(l.Vertex(firstIdx+n-1).Vector) == -1 { + return firstIdx, 1 + } + + // n <= firstIdx <= 2*n-1, so (firstIdx+n*dir) >= 0. + firstIdx += n + return firstIdx, -1 +} + +// TurningAngle returns the sum of the turning angles at each vertex. The return +// value is positive if the loop is counter-clockwise, negative if the loop is +// clockwise, and zero if the loop is a great circle. Degenerate and +// nearly-degenerate loops are handled consistently with Sign. So for example, +// if a loop has zero area (i.e., it is a very small CCW loop) then the turning +// angle will always be negative. +// +// This quantity is also called the "geodesic curvature" of the loop. +func (l *Loop) TurningAngle() float64 { + // For empty and full loops, we return the limit value as the loop area + // approaches 0 or 4*Pi respectively. + if l.isEmptyOrFull() { + if l.ContainsOrigin() { + return -2 * math.Pi + } + return 2 * math.Pi + } + + // Don't crash even if the loop is not well-defined. + if len(l.vertices) < 3 { + return 0 + } + + // To ensure that we get the same result when the vertex order is rotated, + // and that the result is negated when the vertex order is reversed, we need + // to add up the individual turn angles in a consistent order. (In general, + // adding up a set of numbers in a different order can change the sum due to + // rounding errors.) + // + // Furthermore, if we just accumulate an ordinary sum then the worst-case + // error is quadratic in the number of vertices. (This can happen with + // spiral shapes, where the partial sum of the turning angles can be linear + // in the number of vertices.) To avoid this we use the Kahan summation + // algorithm (http://en.wikipedia.org/wiki/Kahan_summation_algorithm). + n := len(l.vertices) + i, dir := l.CanonicalFirstVertex() + sum := TurnAngle(l.Vertex((i+n-dir)%n), l.Vertex(i), l.Vertex((i+dir)%n)) + + compensation := s1.Angle(0) + for n-1 > 0 { + i += dir + angle := TurnAngle(l.Vertex(i-dir), l.Vertex(i), l.Vertex(i+dir)) + oldSum := sum + angle += compensation + sum += angle + compensation = (oldSum - sum) + angle + n-- + } + + const maxCurvature = 2*math.Pi - 4*dblEpsilon + + return math.Max(-maxCurvature, math.Min(maxCurvature, float64(dir)*float64(sum+compensation))) +} + +// turningAngleMaxError return the maximum error in TurningAngle. The value is not +// constant; it depends on the loop. +func (l *Loop) turningAngleMaxError() float64 { + // The maximum error can be bounded as follows: + // 3.00 * dblEpsilon for RobustCrossProd(b, a) + // 3.00 * dblEpsilon for RobustCrossProd(c, b) + // 3.25 * dblEpsilon for Angle() + // 2.00 * dblEpsilon for each addition in the Kahan summation + // ------------------ + // 11.25 * dblEpsilon + maxErrorPerVertex := 11.25 * dblEpsilon + return maxErrorPerVertex * float64(len(l.vertices)) +} + +// IsHole reports whether this loop represents a hole in its containing polygon. +func (l *Loop) IsHole() bool { return l.depth&1 != 0 } + +// Sign returns -1 if this Loop represents a hole in its containing polygon, and +1 otherwise. +func (l *Loop) Sign() int { + if l.IsHole() { + return -1 + } + return 1 +} + +// IsNormalized reports whether the loop area is at most 2*pi. Degenerate loops are +// handled consistently with Sign, i.e., if a loop can be +// expressed as the union of degenerate or nearly-degenerate CCW triangles, +// then it will always be considered normalized. +func (l *Loop) IsNormalized() bool { + // Optimization: if the longitude span is less than 180 degrees, then the + // loop covers less than half the sphere and is therefore normalized. + if l.bound.Lng.Length() < math.Pi { + return true + } + + // We allow some error so that hemispheres are always considered normalized. + // TODO(roberts): This is no longer required by the Polygon implementation, + // so alternatively we could create the invariant that a loop is normalized + // if and only if its complement is not normalized. + return l.TurningAngle() >= -l.turningAngleMaxError() +} + +// Normalize inverts the loop if necessary so that the area enclosed by the loop +// is at most 2*pi. +func (l *Loop) Normalize() { + if !l.IsNormalized() { + l.Invert() + } +} + +// Invert reverses the order of the loop vertices, effectively complementing the +// region represented by the loop. For example, the loop ABCD (with edges +// AB, BC, CD, DA) becomes the loop DCBA (with edges DC, CB, BA, AD). +// Notice that the last edge is the same in both cases except that its +// direction has been reversed. +func (l *Loop) Invert() { + l.index.Reset() + if l.isEmptyOrFull() { + if l.IsFull() { + l.vertices[0] = emptyLoopPoint + } else { + l.vertices[0] = fullLoopPoint + } + } else { + // For non-special loops, reverse the slice of vertices. + for i := len(l.vertices)/2 - 1; i >= 0; i-- { + opp := len(l.vertices) - 1 - i + l.vertices[i], l.vertices[opp] = l.vertices[opp], l.vertices[i] + } + } + + // originInside must be set correctly before building the ShapeIndex. + l.originInside = !l.originInside + if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 { + // The complement of this loop contains both poles. + l.bound = FullRect() + l.subregionBound = l.bound + } else { + l.initBound() + } + l.index.Add(l) +} + +// findVertex returns the index of the vertex at the given Point in the range +// 1..numVertices, and a boolean indicating if a vertex was found. +func (l *Loop) findVertex(p Point) (index int, ok bool) { + const notFound = 0 + if len(l.vertices) < 10 { + // Exhaustive search for loops below a small threshold. + for i := 1; i <= len(l.vertices); i++ { + if l.Vertex(i) == p { + return i, true + } + } + return notFound, false + } + + it := l.index.Iterator() + if !it.LocatePoint(p) { + return notFound, false + } + + aClipped := it.IndexCell().findByShapeID(0) + for i := aClipped.numEdges() - 1; i >= 0; i-- { + ai := aClipped.edges[i] + if l.Vertex(ai) == p { + if ai == 0 { + return len(l.vertices), true + } + return ai, true + } + + if l.Vertex(ai+1) == p { + return ai + 1, true + } + } + return notFound, false +} + +// ContainsNested reports whether the given loops is contained within this loop. +// This function does not test for edge intersections. The two loops must meet +// all of the Polygon requirements; for example this implies that their +// boundaries may not cross or have any shared edges (although they may have +// shared vertices). +func (l *Loop) ContainsNested(other *Loop) bool { + if !l.subregionBound.Contains(other.bound) { + return false + } + + // Special cases to handle either loop being empty or full. Also bail out + // when B has no vertices to avoid heap overflow on the vertex(1) call + // below. (This method is called during polygon initialization before the + // client has an opportunity to call IsValid().) + if l.isEmptyOrFull() || other.NumVertices() < 2 { + return l.IsFull() || other.IsEmpty() + } + + // We are given that A and B do not share any edges, and that either one + // loop contains the other or they do not intersect. + m, ok := l.findVertex(other.Vertex(1)) + if !ok { + // Since other.vertex(1) is not shared, we can check whether A contains it. + return l.ContainsPoint(other.Vertex(1)) + } + + // Check whether the edge order around other.Vertex(1) is compatible with + // A containing B. + return WedgeContains(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), other.Vertex(0), other.Vertex(2)) +} + +// surfaceIntegralFloat64 computes the oriented surface integral of some quantity f(x) +// over the loop interior, given a function f(A,B,C) that returns the +// corresponding integral over the spherical triangle ABC. Here "oriented +// surface integral" means: +// +// (1) f(A,B,C) must be the integral of f if ABC is counterclockwise, +// and the integral of -f if ABC is clockwise. +// +// (2) The result of this function is *either* the integral of f over the +// loop interior, or the integral of (-f) over the loop exterior. +// +// Note that there are at least two common situations where it easy to work +// around property (2) above: +// +// - If the integral of f over the entire sphere is zero, then it doesn't +// matter which case is returned because they are always equal. +// +// - If f is non-negative, then it is easy to detect when the integral over +// the loop exterior has been returned, and the integral over the loop +// interior can be obtained by adding the integral of f over the entire +// unit sphere (a constant) to the result. +// +// Any changes to this method may need corresponding changes to surfaceIntegralPoint as well. +func (l *Loop) surfaceIntegralFloat64(f func(a, b, c Point) float64) float64 { + // We sum f over a collection T of oriented triangles, possibly + // overlapping. Let the sign of a triangle be +1 if it is CCW and -1 + // otherwise, and let the sign of a point x be the sum of the signs of the + // triangles containing x. Then the collection of triangles T is chosen + // such that either: + // + // (1) Each point in the loop interior has sign +1, and sign 0 otherwise; or + // (2) Each point in the loop exterior has sign -1, and sign 0 otherwise. + // + // The triangles basically consist of a fan from vertex 0 to every loop + // edge that does not include vertex 0. These triangles will always satisfy + // either (1) or (2). However, what makes this a bit tricky is that + // spherical edges become numerically unstable as their length approaches + // 180 degrees. Of course there is not much we can do if the loop itself + // contains such edges, but we would like to make sure that all the triangle + // edges under our control (i.e., the non-loop edges) are stable. For + // example, consider a loop around the equator consisting of four equally + // spaced points. This is a well-defined loop, but we cannot just split it + // into two triangles by connecting vertex 0 to vertex 2. + // + // We handle this type of situation by moving the origin of the triangle fan + // whenever we are about to create an unstable edge. We choose a new + // location for the origin such that all relevant edges are stable. We also + // create extra triangles with the appropriate orientation so that the sum + // of the triangle signs is still correct at every point. + + // The maximum length of an edge for it to be considered numerically stable. + // The exact value is fairly arbitrary since it depends on the stability of + // the function f. The value below is quite conservative but could be + // reduced further if desired. + const maxLength = math.Pi - 1e-5 + + var sum float64 + origin := l.Vertex(0) + for i := 1; i+1 < len(l.vertices); i++ { + // Let V_i be vertex(i), let O be the current origin, and let length(A,B) + // be the length of edge (A,B). At the start of each loop iteration, the + // "leading edge" of the triangle fan is (O,V_i), and we want to extend + // the triangle fan so that the leading edge is (O,V_i+1). + // + // Invariants: + // 1. length(O,V_i) < maxLength for all (i > 1). + // 2. Either O == V_0, or O is approximately perpendicular to V_0. + // 3. "sum" is the oriented integral of f over the area defined by + // (O, V_0, V_1, ..., V_i). + if l.Vertex(i+1).Angle(origin.Vector) > maxLength { + // We are about to create an unstable edge, so choose a new origin O' + // for the triangle fan. + oldOrigin := origin + if origin == l.Vertex(0) { + // The following point is well-separated from V_i and V_0 (and + // therefore V_i+1 as well). + origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()} + } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength { + // All edges of the triangle (O, V_0, V_i) are stable, so we can + // revert to using V_0 as the origin. + origin = l.Vertex(0) + } else { + // (O, V_i+1) and (V_0, V_i) are antipodal pairs, and O and V_0 are + // perpendicular. Therefore V_0.CrossProd(O) is approximately + // perpendicular to all of {O, V_0, V_i, V_i+1}, and we can choose + // this point O' as the new origin. + origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)} + + // Advance the edge (V_0,O) to (V_0,O'). + sum += f(l.Vertex(0), oldOrigin, origin) + } + // Advance the edge (O,V_i) to (O',V_i). + sum += f(oldOrigin, l.Vertex(i), origin) + } + // Advance the edge (O,V_i) to (O,V_i+1). + sum += f(origin, l.Vertex(i), l.Vertex(i+1)) + } + // If the origin is not V_0, we need to sum one more triangle. + if origin != l.Vertex(0) { + // Advance the edge (O,V_n-1) to (O,V_0). + sum += f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)) + } + return sum +} + +// surfaceIntegralPoint mirrors the surfaceIntegralFloat64 method but over Points; +// see that method for commentary. The C++ version uses a templated method. +// Any changes to this method may need corresponding changes to surfaceIntegralFloat64 as well. +func (l *Loop) surfaceIntegralPoint(f func(a, b, c Point) Point) Point { + const maxLength = math.Pi - 1e-5 + var sum r3.Vector + + origin := l.Vertex(0) + for i := 1; i+1 < len(l.vertices); i++ { + if l.Vertex(i+1).Angle(origin.Vector) > maxLength { + oldOrigin := origin + if origin == l.Vertex(0) { + origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()} + } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength { + origin = l.Vertex(0) + } else { + origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)} + sum = sum.Add(f(l.Vertex(0), oldOrigin, origin).Vector) + } + sum = sum.Add(f(oldOrigin, l.Vertex(i), origin).Vector) + } + sum = sum.Add(f(origin, l.Vertex(i), l.Vertex(i+1)).Vector) + } + if origin != l.Vertex(0) { + sum = sum.Add(f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)).Vector) + } + return Point{sum} +} + +// Area returns the area of the loop interior, i.e. the region on the left side of +// the loop. The return value is between 0 and 4*pi. (Note that the return +// value is not affected by whether this loop is a "hole" or a "shell".) +func (l *Loop) Area() float64 { + // It is surprisingly difficult to compute the area of a loop robustly. The + // main issues are (1) whether degenerate loops are considered to be CCW or + // not (i.e., whether their area is close to 0 or 4*pi), and (2) computing + // the areas of small loops with good relative accuracy. + // + // With respect to degeneracies, we would like Area to be consistent + // with ContainsPoint in that loops that contain many points + // should have large areas, and loops that contain few points should have + // small areas. For example, if a degenerate triangle is considered CCW + // according to s2predicates Sign, then it will contain very few points and + // its area should be approximately zero. On the other hand if it is + // considered clockwise, then it will contain virtually all points and so + // its area should be approximately 4*pi. + // + // More precisely, let U be the set of Points for which IsUnitLength + // is true, let P(U) be the projection of those points onto the mathematical + // unit sphere, and let V(P(U)) be the Voronoi diagram of the projected + // points. Then for every loop x, we would like Area to approximately + // equal the sum of the areas of the Voronoi regions of the points p for + // which x.ContainsPoint(p) is true. + // + // The second issue is that we want to compute the area of small loops + // accurately. This requires having good relative precision rather than + // good absolute precision. For example, if the area of a loop is 1e-12 and + // the error is 1e-15, then the area only has 3 digits of accuracy. (For + // reference, 1e-12 is about 40 square meters on the surface of the earth.) + // We would like to have good relative accuracy even for small loops. + // + // To achieve these goals, we combine two different methods of computing the + // area. This first method is based on the Gauss-Bonnet theorem, which says + // that the area enclosed by the loop equals 2*pi minus the total geodesic + // curvature of the loop (i.e., the sum of the "turning angles" at all the + // loop vertices). The big advantage of this method is that as long as we + // use Sign to compute the turning angle at each vertex, then + // degeneracies are always handled correctly. In other words, if a + // degenerate loop is CCW according to the symbolic perturbations used by + // Sign, then its turning angle will be approximately 2*pi. + // + // The disadvantage of the Gauss-Bonnet method is that its absolute error is + // about 2e-15 times the number of vertices (see turningAngleMaxError). + // So, it cannot compute the area of small loops accurately. + // + // The second method is based on splitting the loop into triangles and + // summing the area of each triangle. To avoid the difficulty and expense + // of decomposing the loop into a union of non-overlapping triangles, + // instead we compute a signed sum over triangles that may overlap (see the + // comments for surfaceIntegral). The advantage of this method + // is that the area of each triangle can be computed with much better + // relative accuracy (using l'Huilier's theorem). The disadvantage is that + // the result is a signed area: CCW loops may yield a small positive value, + // while CW loops may yield a small negative value (which is converted to a + // positive area by adding 4*pi). This means that small errors in computing + // the signed area may translate into a very large error in the result (if + // the sign of the sum is incorrect). + // + // So, our strategy is to combine these two methods as follows. First we + // compute the area using the "signed sum over triangles" approach (since it + // is generally more accurate). We also estimate the maximum error in this + // result. If the signed area is too close to zero (i.e., zero is within + // the error bounds), then we double-check the sign of the result using the + // Gauss-Bonnet method. (In fact we just call IsNormalized, which is + // based on this method.) If the two methods disagree, we return either 0 + // or 4*pi based on the result of IsNormalized. Otherwise we return the + // area that we computed originally. + if l.isEmptyOrFull() { + if l.ContainsOrigin() { + return 4 * math.Pi + } + return 0 + } + area := l.surfaceIntegralFloat64(SignedArea) + + // TODO(roberts): This error estimate is very approximate. There are two + // issues: (1) SignedArea needs some improvements to ensure that its error + // is actually never higher than GirardArea, and (2) although the number of + // triangles in the sum is typically N-2, in theory it could be as high as + // 2*N for pathological inputs. But in other respects this error bound is + // very conservative since it assumes that the maximum error is achieved on + // every triangle. + maxError := l.turningAngleMaxError() + + // The signed area should be between approximately -4*pi and 4*pi. + if area < 0 { + // We have computed the negative of the area of the loop exterior. + area += 4 * math.Pi + } + + if area > 4*math.Pi { + area = 4 * math.Pi + } + if area < 0 { + area = 0 + } + + // If the area is close enough to zero or 4*pi so that the loop orientation + // is ambiguous, then we compute the loop orientation explicitly. + if area < maxError && !l.IsNormalized() { + return 4 * math.Pi + } else if area > (4*math.Pi-maxError) && l.IsNormalized() { + return 0 + } + + return area +} + +// Centroid returns the true centroid of the loop multiplied by the area of the +// loop. The result is not unit length, so you may want to normalize it. Also +// note that in general, the centroid may not be contained by the loop. +// +// We prescale by the loop area for two reasons: (1) it is cheaper to +// compute this way, and (2) it makes it easier to compute the centroid of +// more complicated shapes (by splitting them into disjoint regions and +// adding their centroids). +// +// Note that the return value is not affected by whether this loop is a +// "hole" or a "shell". +func (l *Loop) Centroid() Point { + // surfaceIntegralPoint() returns either the integral of position over loop + // interior, or the negative of the integral of position over the loop + // exterior. But these two values are the same (!), because the integral of + // position over the entire sphere is (0, 0, 0). + return l.surfaceIntegralPoint(TrueCentroid) +} + +// Encode encodes the Loop. +func (l Loop) Encode(w io.Writer) error { + e := &encoder{w: w} + l.encode(e) + return e.err +} + +func (l Loop) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeUint32(uint32(len(l.vertices))) + for _, v := range l.vertices { + e.writeFloat64(v.X) + e.writeFloat64(v.Y) + e.writeFloat64(v.Z) + } + + e.writeBool(l.originInside) + e.writeInt32(int32(l.depth)) + + // Encode the bound. + l.bound.encode(e) +} + +// Decode decodes a loop. +func (l *Loop) Decode(r io.Reader) error { + *l = Loop{} + d := &decoder{r: asByteReader(r)} + l.decode(d) + return d.err +} + +func (l *Loop) decode(d *decoder) { + version := int8(d.readUint8()) + if d.err != nil { + return + } + if version != encodingVersion { + d.err = fmt.Errorf("cannot decode version %d", version) + return + } + + // Empty loops are explicitly allowed here: a newly created loop has zero vertices + // and such loops encode and decode properly. + nvertices := d.readUint32() + if nvertices > maxEncodedVertices { + if d.err == nil { + d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) + + } + return + } + l.vertices = make([]Point, nvertices) + for i := range l.vertices { + l.vertices[i].X = d.readFloat64() + l.vertices[i].Y = d.readFloat64() + l.vertices[i].Z = d.readFloat64() + } + l.index = NewShapeIndex() + l.originInside = d.readBool() + l.depth = int(d.readUint32()) + l.bound.decode(d) + l.subregionBound = ExpandForSubregions(l.bound) + + l.index.Add(l) +} + +// Bitmasks to read from properties. +const ( + originInside = 1 << iota + boundEncoded +) + +func (l *Loop) xyzFaceSiTiVertices() []xyzFaceSiTi { + ret := make([]xyzFaceSiTi, len(l.vertices)) + for i, v := range l.vertices { + ret[i].xyz = v + ret[i].face, ret[i].si, ret[i].ti, ret[i].level = xyzToFaceSiTi(v) + } + return ret +} + +func (l *Loop) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) { + if len(l.vertices) != len(vertices) { + panic("encodeCompressed: vertices must be the same length as l.vertices") + } + if len(vertices) > maxEncodedVertices { + if e.err == nil { + e.err = fmt.Errorf("too many vertices (%d; max is %d)", len(vertices), maxEncodedVertices) + } + return + } + e.writeUvarint(uint64(len(vertices))) + encodePointsCompressed(e, vertices, snapLevel) + + props := l.compressedEncodingProperties() + e.writeUvarint(props) + e.writeUvarint(uint64(l.depth)) + if props&boundEncoded != 0 { + l.bound.encode(e) + } +} + +func (l *Loop) compressedEncodingProperties() uint64 { + var properties uint64 + if l.originInside { + properties |= originInside + } + + // Write whether there is a bound so we can change the threshold later. + // Recomputing the bound multiplies the decode time taken per vertex + // by a factor of about 3.5. Without recomputing the bound, decode + // takes approximately 125 ns / vertex. A loop with 63 vertices + // encoded without the bound will take ~30us to decode, which is + // acceptable. At ~3.5 bytes / vertex without the bound, adding + // the bound will increase the size by <15%, which is also acceptable. + const minVerticesForBound = 64 + if len(l.vertices) >= minVerticesForBound { + properties |= boundEncoded + } + + return properties +} + +func (l *Loop) decodeCompressed(d *decoder, snapLevel int) { + nvertices := d.readUvarint() + if d.err != nil { + return + } + if nvertices > maxEncodedVertices { + d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) + return + } + l.vertices = make([]Point, nvertices) + decodePointsCompressed(d, snapLevel, l.vertices) + properties := d.readUvarint() + + // Make sure values are valid before using. + if d.err != nil { + return + } + + l.index = NewShapeIndex() + l.originInside = (properties & originInside) != 0 + + l.depth = int(d.readUvarint()) + + if (properties & boundEncoded) != 0 { + l.bound.decode(d) + if d.err != nil { + return + } + l.subregionBound = ExpandForSubregions(l.bound) + } else { + l.initBound() + } + + l.index.Add(l) +} + +// crossingTarget is an enum representing the possible crossing target cases for relations. +type crossingTarget int + +const ( + crossingTargetDontCare crossingTarget = iota + crossingTargetDontCross + crossingTargetCross +) + +// loopRelation defines the interface for checking a type of relationship between two loops. +// Some examples of relations are Contains, Intersects, or CompareBoundary. +type loopRelation interface { + // Optionally, aCrossingTarget and bCrossingTarget can specify an early-exit + // condition for the loop relation. If any point P is found such that + // + // A.ContainsPoint(P) == aCrossingTarget() && + // B.ContainsPoint(P) == bCrossingTarget() + // + // then the loop relation is assumed to be the same as if a pair of crossing + // edges were found. For example, the ContainsPoint relation has + // + // aCrossingTarget() == crossingTargetDontCross + // bCrossingTarget() == crossingTargetCross + // + // because if A.ContainsPoint(P) == false and B.ContainsPoint(P) == true + // for any point P, then it is equivalent to finding an edge crossing (i.e., + // since Contains returns false in both cases). + // + // Loop relations that do not have an early-exit condition of this form + // should return crossingTargetDontCare for both crossing targets. + + // aCrossingTarget reports whether loop A crosses the target point with + // the given relation type. + aCrossingTarget() crossingTarget + // bCrossingTarget reports whether loop B crosses the target point with + // the given relation type. + bCrossingTarget() crossingTarget + + // wedgesCross reports if a shared vertex ab1 and the two associated wedges + // (a0, ab1, b2) and (b0, ab1, b2) are equivalent to an edge crossing. + // The loop relation is also allowed to maintain its own internal state, and + // can return true if it observes any sequence of wedges that are equivalent + // to an edge crossing. + wedgesCross(a0, ab1, a2, b0, b2 Point) bool +} + +// loopCrosser is a helper type for determining whether two loops cross. +// It is instantiated twice for each pair of loops to be tested, once for the +// pair (A,B) and once for the pair (B,A), in order to be able to process +// edges in either loop nesting order. +type loopCrosser struct { + a, b *Loop + relation loopRelation + swapped bool + aCrossingTarget crossingTarget + bCrossingTarget crossingTarget + + // state maintained by startEdge and edgeCrossesCell. + crosser *EdgeCrosser + aj, bjPrev int + + // temporary data declared here to avoid repeated memory allocations. + bQuery *CrossingEdgeQuery + bCells []*ShapeIndexCell +} + +// newLoopCrosser creates a loopCrosser from the given values. If swapped is true, +// the loops A and B have been swapped. This affects how arguments are passed to +// the given loop relation, since for example A.Contains(B) is not the same as +// B.Contains(A). +func newLoopCrosser(a, b *Loop, relation loopRelation, swapped bool) *loopCrosser { + l := &loopCrosser{ + a: a, + b: b, + relation: relation, + swapped: swapped, + aCrossingTarget: relation.aCrossingTarget(), + bCrossingTarget: relation.bCrossingTarget(), + bQuery: NewCrossingEdgeQuery(b.index), + } + if swapped { + l.aCrossingTarget, l.bCrossingTarget = l.bCrossingTarget, l.aCrossingTarget + } + + return l +} + +// startEdge sets the crossers state for checking the given edge of loop A. +func (l *loopCrosser) startEdge(aj int) { + l.crosser = NewEdgeCrosser(l.a.Vertex(aj), l.a.Vertex(aj+1)) + l.aj = aj + l.bjPrev = -2 +} + +// edgeCrossesCell reports whether the current edge of loop A has any crossings with +// edges of the index cell of loop B. +func (l *loopCrosser) edgeCrossesCell(bClipped *clippedShape) bool { + // Test the current edge of A against all edges of bClipped + bNumEdges := bClipped.numEdges() + for j := 0; j < bNumEdges; j++ { + bj := bClipped.edges[j] + if bj != l.bjPrev+1 { + l.crosser.RestartAt(l.b.Vertex(bj)) + } + l.bjPrev = bj + if crossing := l.crosser.ChainCrossingSign(l.b.Vertex(bj + 1)); crossing == DoNotCross { + continue + } else if crossing == Cross { + return true + } + + // We only need to check each shared vertex once, so we only + // consider the case where l.aVertex(l.aj+1) == l.b.Vertex(bj+1). + if l.a.Vertex(l.aj+1) == l.b.Vertex(bj+1) { + if l.swapped { + if l.relation.wedgesCross(l.b.Vertex(bj), l.b.Vertex(bj+1), l.b.Vertex(bj+2), l.a.Vertex(l.aj), l.a.Vertex(l.aj+2)) { + return true + } + } else { + if l.relation.wedgesCross(l.a.Vertex(l.aj), l.a.Vertex(l.aj+1), l.a.Vertex(l.aj+2), l.b.Vertex(bj), l.b.Vertex(bj+2)) { + return true + } + } + } + } + + return false +} + +// cellCrossesCell reports whether there are any edge crossings or wedge crossings +// within the two given cells. +func (l *loopCrosser) cellCrossesCell(aClipped, bClipped *clippedShape) bool { + // Test all edges of aClipped against all edges of bClipped. + for _, edge := range aClipped.edges { + l.startEdge(edge) + if l.edgeCrossesCell(bClipped) { + return true + } + } + + return false +} + +// cellCrossesAnySubcell reports whether given an index cell of A, if there are any +// edge or wedge crossings with any index cell of B contained within bID. +func (l *loopCrosser) cellCrossesAnySubcell(aClipped *clippedShape, bID CellID) bool { + // Test all edges of aClipped against all edges of B. The relevant B + // edges are guaranteed to be children of bID, which lets us find the + // correct index cells more efficiently. + bRoot := PaddedCellFromCellID(bID, 0) + for _, aj := range aClipped.edges { + // Use an CrossingEdgeQuery starting at bRoot to find the index cells + // of B that might contain crossing edges. + l.bCells = l.bQuery.getCells(l.a.Vertex(aj), l.a.Vertex(aj+1), bRoot) + if len(l.bCells) == 0 { + continue + } + l.startEdge(aj) + for c := 0; c < len(l.bCells); c++ { + if l.edgeCrossesCell(l.bCells[c].shapes[0]) { + return true + } + } + } + + return false +} + +// hasCrossing reports whether given two iterators positioned such that +// ai.cellID().ContainsCellID(bi.cellID()), there is an edge or wedge crossing +// anywhere within ai.cellID(). This function advances bi only past ai.cellID(). +func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool { + // If ai.CellID() intersects many edges of B, then it is faster to use + // CrossingEdgeQuery to narrow down the candidates. But if it intersects + // only a few edges, it is faster to check all the crossings directly. + // We handle this by advancing bi and keeping track of how many edges we + // would need to test. + const edgeQueryMinEdges = 20 // Tuned from benchmarks. + var totalEdges int + l.bCells = nil + + for { + if n := bi.it.IndexCell().shapes[0].numEdges(); n > 0 { + totalEdges += n + if totalEdges >= edgeQueryMinEdges { + // There are too many edges to test them directly, so use CrossingEdgeQuery. + if l.cellCrossesAnySubcell(ai.it.IndexCell().shapes[0], ai.cellID()) { + return true + } + bi.seekBeyond(ai) + return false + } + l.bCells = append(l.bCells, bi.indexCell()) + } + bi.next() + if bi.cellID() > ai.rangeMax { + break + } + } + + // Test all the edge crossings directly. + for _, c := range l.bCells { + if l.cellCrossesCell(ai.it.IndexCell().shapes[0], c.shapes[0]) { + return true + } + } + + return false +} + +// containsCenterMatches reports if the clippedShapes containsCenter boolean corresponds +// to the crossing target type given. (This is to work around C++ allowing false == 0, +// true == 1 type implicit conversions and comparisons) +func containsCenterMatches(a *clippedShape, target crossingTarget) bool { + return (!a.containsCenter && target == crossingTargetDontCross) || + (a.containsCenter && target == crossingTargetCross) +} + +// hasCrossingRelation reports whether given two iterators positioned such that +// ai.cellID().ContainsCellID(bi.cellID()), there is a crossing relationship +// anywhere within ai.cellID(). Specifically, this method returns true if there +// is an edge crossing, a wedge crossing, or a point P that matches both relations +// crossing targets. This function advances both iterators past ai.cellID. +func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool { + aClipped := ai.it.IndexCell().shapes[0] + if aClipped.numEdges() != 0 { + // The current cell of A has at least one edge, so check for crossings. + if l.hasCrossing(ai, bi) { + return true + } + ai.next() + return false + } + + if !containsCenterMatches(aClipped, l.aCrossingTarget) { + // The crossing target for A is not satisfied, so we skip over these cells of B. + bi.seekBeyond(ai) + ai.next() + return false + } + + // All points within ai.cellID() satisfy the crossing target for A, so it's + // worth iterating through the cells of B to see whether any cell + // centers also satisfy the crossing target for B. + for bi.cellID() <= ai.rangeMax { + bClipped := bi.it.IndexCell().shapes[0] + if containsCenterMatches(bClipped, l.bCrossingTarget) { + return true + } + bi.next() + } + ai.next() + return false +} + +// hasCrossingRelation checks all edges of loop A for intersection against all edges +// of loop B and reports if there are any that satisfy the given relation. If there +// is any shared vertex, the wedges centered at this vertex are sent to the given +// relation to be tested. +// +// If the two loop boundaries cross, this method is guaranteed to return +// true. It also returns true in certain cases if the loop relationship is +// equivalent to crossing. For example, if the relation is Contains and a +// point P is found such that B contains P but A does not contain P, this +// method will return true to indicate that the result is the same as though +// a pair of crossing edges were found (since Contains returns false in +// both cases). +// +// See Contains, Intersects and CompareBoundary for the three uses of this function. +func hasCrossingRelation(a, b *Loop, relation loopRelation) bool { + // We look for CellID ranges where the indexes of A and B overlap, and + // then test those edges for crossings. + ai := newRangeIterator(a.index) + bi := newRangeIterator(b.index) + + ab := newLoopCrosser(a, b, relation, false) // Tests edges of A against B + ba := newLoopCrosser(b, a, relation, true) // Tests edges of B against A + + for !ai.done() || !bi.done() { + if ai.rangeMax < bi.rangeMin { + // The A and B cells don't overlap, and A precedes B. + ai.seekTo(bi) + } else if bi.rangeMax < ai.rangeMin { + // The A and B cells don't overlap, and B precedes A. + bi.seekTo(ai) + } else { + // One cell contains the other. Determine which cell is larger. + abRelation := int64(ai.it.CellID().lsb() - bi.it.CellID().lsb()) + if abRelation > 0 { + // A's index cell is larger. + if ab.hasCrossingRelation(ai, bi) { + return true + } + } else if abRelation < 0 { + // B's index cell is larger. + if ba.hasCrossingRelation(bi, ai) { + return true + } + } else { + // The A and B cells are the same. Since the two cells + // have the same center point P, check whether P satisfies + // the crossing targets. + aClipped := ai.it.IndexCell().shapes[0] + bClipped := bi.it.IndexCell().shapes[0] + if containsCenterMatches(aClipped, ab.aCrossingTarget) && + containsCenterMatches(bClipped, ab.bCrossingTarget) { + return true + } + // Otherwise test all the edge crossings directly. + if aClipped.numEdges() > 0 && bClipped.numEdges() > 0 && ab.cellCrossesCell(aClipped, bClipped) { + return true + } + ai.next() + bi.next() + } + } + } + return false +} + +// containsRelation implements loopRelation for a contains operation. If +// A.ContainsPoint(P) == false && B.ContainsPoint(P) == true, it is equivalent +// to having an edge crossing (i.e., Contains returns false). +type containsRelation struct { + foundSharedVertex bool +} + +func (c *containsRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCross } +func (c *containsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross } +func (c *containsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { + c.foundSharedVertex = true + return !WedgeContains(a0, ab1, a2, b0, b2) +} + +// intersectsRelation implements loopRelation for an intersects operation. Given +// two loops, A and B, if A.ContainsPoint(P) == true && B.ContainsPoint(P) == true, +// it is equivalent to having an edge crossing (i.e., Intersects returns true). +type intersectsRelation struct { + foundSharedVertex bool +} + +func (i *intersectsRelation) aCrossingTarget() crossingTarget { return crossingTargetCross } +func (i *intersectsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross } +func (i *intersectsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { + i.foundSharedVertex = true + return WedgeIntersects(a0, ab1, a2, b0, b2) +} + +// compareBoundaryRelation implements loopRelation for comparing boundaries. +// +// The compare boundary relation does not have a useful early-exit condition, +// so we return crossingTargetDontCare for both crossing targets. +// +// Aside: A possible early exit condition could be based on the following. +// If A contains a point of both B and ~B, then A intersects Boundary(B). +// If ~A contains a point of both B and ~B, then ~A intersects Boundary(B). +// So if the intersections of {A, ~A} with {B, ~B} are all non-empty, +// the return value is 0, i.e., Boundary(A) intersects Boundary(B). +// Unfortunately it isn't worth detecting this situation because by the +// time we have seen a point in all four intersection regions, we are also +// guaranteed to have seen at least one pair of crossing edges. +type compareBoundaryRelation struct { + reverse bool // True if the other loop should be reversed. + foundSharedVertex bool // True if any wedge was processed. + containsEdge bool // True if any edge of the other loop is contained by this loop. + excludesEdge bool // True if any edge of the other loop is excluded by this loop. +} + +func newCompareBoundaryRelation(reverse bool) *compareBoundaryRelation { + return &compareBoundaryRelation{reverse: reverse} +} + +func (c *compareBoundaryRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCare } +func (c *compareBoundaryRelation) bCrossingTarget() crossingTarget { return crossingTargetDontCare } +func (c *compareBoundaryRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { + // Because we don't care about the interior of the other, only its boundary, + // it is sufficient to check whether this one contains the semiwedge (ab1, b2). + c.foundSharedVertex = true + if wedgeContainsSemiwedge(a0, ab1, a2, b2, c.reverse) { + c.containsEdge = true + } else { + c.excludesEdge = true + } + return c.containsEdge && c.excludesEdge +} + +// wedgeContainsSemiwedge reports whether the wedge (a0, ab1, a2) contains the +// "semiwedge" defined as any non-empty open set of rays immediately CCW from +// the edge (ab1, b2). If reverse is true, then substitute clockwise for CCW; +// this simulates what would happen if the direction of the other loop was reversed. +func wedgeContainsSemiwedge(a0, ab1, a2, b2 Point, reverse bool) bool { + if b2 == a0 || b2 == a2 { + // We have a shared or reversed edge. + return (b2 == a0) == reverse + } + return OrderedCCW(a0, a2, b2, ab1) +} + +// containsNonCrossingBoundary reports whether given two loops whose boundaries +// do not cross (see compareBoundary), if this loop contains the boundary of the +// other loop. If reverse is true, the boundary of the other loop is reversed +// first (which only affects the result when there are shared edges). This method +// is cheaper than compareBoundary because it does not test for edge intersections. +// +// This function requires that neither loop is empty, and that if the other is full, +// then reverse == false. +func (l *Loop) containsNonCrossingBoundary(other *Loop, reverseOther bool) bool { + // The bounds must intersect for containment. + if !l.bound.Intersects(other.bound) { + return false + } + + // Full loops are handled as though the loop surrounded the entire sphere. + if l.IsFull() { + return true + } + if other.IsFull() { + return false + } + + m, ok := l.findVertex(other.Vertex(0)) + if !ok { + // Since the other loops vertex 0 is not shared, we can check if this contains it. + return l.ContainsPoint(other.Vertex(0)) + } + // Otherwise check whether the edge (b0, b1) is contained by this loop. + return wedgeContainsSemiwedge(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), + other.Vertex(1), reverseOther) +} + +// TODO(roberts): Differences from the C++ version: +// DistanceToPoint +// DistanceToBoundary +// Project +// ProjectToBoundary +// BoundaryApproxEqual +// BoundaryNear diff --git a/vendor/github.com/blevesearch/geo/s2/matrix3x3.go b/vendor/github.com/blevesearch/geo/s2/matrix3x3.go new file mode 100644 index 0000000..01696fe --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/matrix3x3.go @@ -0,0 +1,127 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + + "github.com/golang/geo/r3" +) + +// matrix3x3 represents a traditional 3x3 matrix of floating point values. +// This is not a full fledged matrix. It only contains the pieces needed +// to satisfy the computations done within the s2 package. +type matrix3x3 [3][3]float64 + +// col returns the given column as a Point. +func (m *matrix3x3) col(col int) Point { + return Point{r3.Vector{m[0][col], m[1][col], m[2][col]}} +} + +// row returns the given row as a Point. +func (m *matrix3x3) row(row int) Point { + return Point{r3.Vector{m[row][0], m[row][1], m[row][2]}} +} + +// setCol sets the specified column to the value in the given Point. +func (m *matrix3x3) setCol(col int, p Point) *matrix3x3 { + m[0][col] = p.X + m[1][col] = p.Y + m[2][col] = p.Z + + return m +} + +// setRow sets the specified row to the value in the given Point. +func (m *matrix3x3) setRow(row int, p Point) *matrix3x3 { + m[row][0] = p.X + m[row][1] = p.Y + m[row][2] = p.Z + + return m +} + +// scale multiplies the matrix by the given value. +func (m *matrix3x3) scale(f float64) *matrix3x3 { + return &matrix3x3{ + [3]float64{f * m[0][0], f * m[0][1], f * m[0][2]}, + [3]float64{f * m[1][0], f * m[1][1], f * m[1][2]}, + [3]float64{f * m[2][0], f * m[2][1], f * m[2][2]}, + } +} + +// mul returns the multiplication of m by the Point p and converts the +// resulting 1x3 matrix into a Point. +func (m *matrix3x3) mul(p Point) Point { + return Point{r3.Vector{ + m[0][0]*p.X + m[0][1]*p.Y + m[0][2]*p.Z, + m[1][0]*p.X + m[1][1]*p.Y + m[1][2]*p.Z, + m[2][0]*p.X + m[2][1]*p.Y + m[2][2]*p.Z, + }} +} + +// det returns the determinant of this matrix. +func (m *matrix3x3) det() float64 { + // | a b c | + // det | d e f | = aei + bfg + cdh - ceg - bdi - afh + // | g h i | + return m[0][0]*m[1][1]*m[2][2] + m[0][1]*m[1][2]*m[2][0] + m[0][2]*m[1][0]*m[2][1] - + m[0][2]*m[1][1]*m[2][0] - m[0][1]*m[1][0]*m[2][2] - m[0][0]*m[1][2]*m[2][1] +} + +// transpose reflects the matrix along its diagonal and returns the result. +func (m *matrix3x3) transpose() *matrix3x3 { + m[0][1], m[1][0] = m[1][0], m[0][1] + m[0][2], m[2][0] = m[2][0], m[0][2] + m[1][2], m[2][1] = m[2][1], m[1][2] + + return m +} + +// String formats the matrix into an easier to read layout. +func (m *matrix3x3) String() string { + return fmt.Sprintf("[ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ]", + m[0][0], m[0][1], m[0][2], + m[1][0], m[1][1], m[1][2], + m[2][0], m[2][1], m[2][2], + ) +} + +// getFrame returns the orthonormal frame for the given point on the unit sphere. +func getFrame(p Point) matrix3x3 { + // Given the point p on the unit sphere, extend this into a right-handed + // coordinate frame of unit-length column vectors m = (x,y,z). Note that + // the vectors (x,y) are an orthonormal frame for the tangent space at point p, + // while p itself is an orthonormal frame for the normal space at p. + m := matrix3x3{} + m.setCol(2, p) + m.setCol(1, Point{p.Ortho()}) + m.setCol(0, Point{m.col(1).Cross(p.Vector)}) + return m +} + +// toFrame returns the coordinates of the given point with respect to its orthonormal basis m. +// The resulting point q satisfies the identity (m * q == p). +func toFrame(m matrix3x3, p Point) Point { + // The inverse of an orthonormal matrix is its transpose. + return m.transpose().mul(p) +} + +// fromFrame returns the coordinates of the given point in standard axis-aligned basis +// from its orthonormal basis m. +// The resulting point p satisfies the identity (p == m * q). +func fromFrame(m matrix3x3, q Point) Point { + return m.mul(q) +} diff --git a/vendor/github.com/blevesearch/geo/s2/max_distance_targets.go b/vendor/github.com/blevesearch/geo/s2/max_distance_targets.go new file mode 100644 index 0000000..92e916d --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/max_distance_targets.go @@ -0,0 +1,306 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// maxDistance implements distance as the supplementary distance (Pi - x) to find +// results that are the furthest using the distance related algorithms. +type maxDistance s1.ChordAngle + +func (m maxDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) } +func (m maxDistance) zero() distance { return maxDistance(s1.StraightChordAngle) } +func (m maxDistance) negative() distance { return maxDistance(s1.InfChordAngle()) } +func (m maxDistance) infinity() distance { return maxDistance(s1.NegativeChordAngle) } +func (m maxDistance) less(other distance) bool { return m.chordAngle() > other.chordAngle() } +func (m maxDistance) sub(other distance) distance { + return maxDistance(m.chordAngle() + other.chordAngle()) +} +func (m maxDistance) chordAngleBound() s1.ChordAngle { + return s1.StraightChordAngle - m.chordAngle() +} +func (m maxDistance) updateDistance(dist distance) (distance, bool) { + if dist.less(m) { + m = maxDistance(dist.chordAngle()) + return m, true + } + return m, false +} + +func (m maxDistance) fromChordAngle(o s1.ChordAngle) distance { + return maxDistance(o) +} + +// MaxDistanceToPointTarget is used for computing the maximum distance to a Point. +type MaxDistanceToPointTarget struct { + point Point + dist distance +} + +// NewMaxDistanceToPointTarget returns a new target for the given Point. +func NewMaxDistanceToPointTarget(point Point) *MaxDistanceToPointTarget { + m := maxDistance(0) + return &MaxDistanceToPointTarget{point: point, dist: &m} +} + +func (m *MaxDistanceToPointTarget) capBound() Cap { + return CapFromCenterChordAngle(Point{m.point.Mul(-1)}, (s1.ChordAngle(0))) +} + +func (m *MaxDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(ChordAngleBetweenPoints(p, m.point))) +} + +func (m *MaxDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := UpdateMaxDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(maxDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MaxDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(cell.MaxDistance(m.point))) +} + +func (m *MaxDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // For furthest points, we visit the polygons whose interior contains + // the antipode of the target point. These are the polygons whose + // distance to the target is maxDistance.zero() + q := NewContainsPointQuery(index, VertexModelSemiOpen) + return q.visitContainingShapes(Point{m.point.Mul(-1)}, func(shape Shape) bool { + return v(shape, m.point) + }) +} + +func (m *MaxDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MaxDistanceToPointTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MaxDistanceToPointTarget) distance() distance { return m.dist } + +// MaxDistanceToEdgeTarget is used for computing the maximum distance to an Edge. +type MaxDistanceToEdgeTarget struct { + e Edge + dist distance +} + +// NewMaxDistanceToEdgeTarget returns a new target for the given Edge. +func NewMaxDistanceToEdgeTarget(e Edge) *MaxDistanceToEdgeTarget { + m := maxDistance(0) + return &MaxDistanceToEdgeTarget{e: e, dist: m} +} + +// capBound returns a Cap that bounds the antipode of the target. (This +// is the set of points whose maxDistance to the target is maxDistance.zero) +func (m *MaxDistanceToEdgeTarget) capBound() Cap { + // The following computes a radius equal to half the edge length in an + // efficient and numerically stable way. + d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1)) + r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2)) + return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Mul(-1).Normalize()}, s1.ChordAngleFromSquaredLength(r2)) +} + +func (m *MaxDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + if d, ok := UpdateMaxDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(maxDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MaxDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := updateEdgePairMaxDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(maxDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MaxDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(cell.MaxDistanceToEdge(m.e.V0, m.e.V1))) +} + +func (m *MaxDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We only need to test one edge point. That is because the method *must* + // visit a polygon if it fully contains the target, and *is allowed* to + // visit a polygon if it intersects the target. If the tested vertex is not + // contained, we know the full edge is not contained; if the tested vertex is + // contained, then the edge either is fully contained (must be visited) or it + // intersects (is allowed to be visited). We visit the center of the edge so + // that edge AB gives identical results to BA. + target := NewMaxDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) + return target.visitContainingShapes(index, v) +} + +func (m *MaxDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MaxDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MaxDistanceToEdgeTarget) distance() distance { return m.dist } + +// MaxDistanceToCellTarget is used for computing the maximum distance to a Cell. +type MaxDistanceToCellTarget struct { + cell Cell + dist distance +} + +// NewMaxDistanceToCellTarget returns a new target for the given Cell. +func NewMaxDistanceToCellTarget(cell Cell) *MaxDistanceToCellTarget { + m := maxDistance(0) + return &MaxDistanceToCellTarget{cell: cell, dist: m} +} + +func (m *MaxDistanceToCellTarget) capBound() Cap { + c := m.cell.CapBound() + return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius()) +} + +func (m *MaxDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(m.cell.MaxDistance(p))) +} + +func (m *MaxDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(m.cell.MaxDistanceToEdge(edge.V0, edge.V1))) +} + +func (m *MaxDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(m.cell.MaxDistanceToCell(cell))) +} + +func (m *MaxDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We only need to check one point here - cell center is simplest. + // See comment at MaxDistanceToEdgeTarget's visitContainingShapes. + target := NewMaxDistanceToPointTarget(m.cell.Center()) + return target.visitContainingShapes(index, v) +} + +func (m *MaxDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MaxDistanceToCellTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MaxDistanceToCellTarget) distance() distance { return m.dist } + +// MaxDistanceToShapeIndexTarget is used for computing the maximum distance to a ShapeIndex. +type MaxDistanceToShapeIndexTarget struct { + index *ShapeIndex + query *EdgeQuery + dist distance +} + +// NewMaxDistanceToShapeIndexTarget returns a new target for the given ShapeIndex. +func NewMaxDistanceToShapeIndexTarget(index *ShapeIndex) *MaxDistanceToShapeIndexTarget { + m := maxDistance(0) + return &MaxDistanceToShapeIndexTarget{ + index: index, + dist: m, + query: NewFurthestEdgeQuery(index, NewFurthestEdgeQueryOptions()), + } +} + +// capBound returns a Cap that bounds the antipode of the target. This +// is the set of points whose maxDistance to the target is maxDistance.zero() +func (m *MaxDistanceToShapeIndexTarget) capBound() Cap { + // TODO(roberts): Depends on ShapeIndexRegion + // c := makeShapeIndexRegion(m.index).CapBound() + // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius()) + panic("not implemented yet") +} + +func (m *MaxDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMaxDistanceToPointTarget(p) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MaxDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMaxDistanceToEdgeTarget(edge) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MaxDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMaxDistanceToCellTarget(cell) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +// visitContainingShapes returns the polygons containing the antipodal +// reflection of *any* connected component for target types consisting of +// multiple connected components. It is sufficient to test containment of +// one vertex per connected component, since this allows us to also return +// any polygon whose boundary has distance.zero() to the target. +func (m *MaxDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // It is sufficient to find the set of chain starts in the target index + // (i.e., one vertex per connected component of edges) that are contained by + // the query index, except for one special case to handle full polygons. + // + // TODO(roberts): Do this by merge-joining the two ShapeIndexes and share + // the code with BooleanOperation. + for _, shape := range m.index.shapes { + numChains := shape.NumChains() + // Shapes that don't have any edges require a special case (below). + testedPoint := false + for c := 0; c < numChains; c++ { + chain := shape.Chain(c) + if chain.Length == 0 { + continue + } + testedPoint = true + target := NewMaxDistanceToPointTarget(shape.ChainEdge(c, 0).V0) + if !target.visitContainingShapes(index, v) { + return false + } + } + if !testedPoint { + // Special case to handle full polygons. + ref := shape.ReferencePoint() + if !ref.Contained { + continue + } + target := NewMaxDistanceToPointTarget(ref.Point) + if !target.visitContainingShapes(index, v) { + return false + } + } + } + return true +} + +func (m *MaxDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool { + m.query.opts.maxError = maxErr + return true +} +func (m *MaxDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MaxDistanceToShapeIndexTarget) distance() distance { return m.dist } +func (m *MaxDistanceToShapeIndexTarget) setIncludeInteriors(b bool) { + m.query.opts.includeInteriors = b +} +func (m *MaxDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b } + +// TODO(roberts): Remaining methods +// +// func (m *MaxDistanceToShapeIndexTarget) capBound() Cap { +// CellUnionTarget diff --git a/vendor/github.com/blevesearch/geo/s2/metric.go b/vendor/github.com/blevesearch/geo/s2/metric.go new file mode 100644 index 0000000..53db3d3 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/metric.go @@ -0,0 +1,164 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file implements functions for various S2 measurements. + +import "math" + +// A Metric is a measure for cells. It is used to describe the shape and size +// of cells. They are useful for deciding which cell level to use in order to +// satisfy a given condition (e.g. that cell vertices must be no further than +// "x" apart). You can use the Value(level) method to compute the corresponding +// length or area on the unit sphere for cells at a given level. The minimum +// and maximum bounds are valid for cells at all levels, but they may be +// somewhat conservative for very large cells (e.g. face cells). +type Metric struct { + // Dim is either 1 or 2, for a 1D or 2D metric respectively. + Dim int + // Deriv is the scaling factor for the metric. + Deriv float64 +} + +// Defined metrics. +// Of the projection methods defined in C++, Go only supports the quadratic projection. + +// Each cell is bounded by four planes passing through its four edges and +// the center of the sphere. These metrics relate to the angle between each +// pair of opposite bounding planes, or equivalently, between the planes +// corresponding to two different s-values or two different t-values. +var ( + MinAngleSpanMetric = Metric{1, 4.0 / 3} + AvgAngleSpanMetric = Metric{1, math.Pi / 2} + MaxAngleSpanMetric = Metric{1, 1.704897179199218452} +) + +// The width of geometric figure is defined as the distance between two +// parallel bounding lines in a given direction. For cells, the minimum +// width is always attained between two opposite edges, and the maximum +// width is attained between two opposite vertices. However, for our +// purposes we redefine the width of a cell as the perpendicular distance +// between a pair of opposite edges. A cell therefore has two widths, one +// in each direction. The minimum width according to this definition agrees +// with the classic geometric one, but the maximum width is different. (The +// maximum geometric width corresponds to MaxDiag defined below.) +// +// The average width in both directions for all cells at level k is approximately +// AvgWidthMetric.Value(k). +// +// The width is useful for bounding the minimum or maximum distance from a +// point on one edge of a cell to the closest point on the opposite edge. +// For example, this is useful when growing regions by a fixed distance. +var ( + MinWidthMetric = Metric{1, 2 * math.Sqrt2 / 3} + AvgWidthMetric = Metric{1, 1.434523672886099389} + MaxWidthMetric = Metric{1, MaxAngleSpanMetric.Deriv} +) + +// The edge length metrics can be used to bound the minimum, maximum, +// or average distance from the center of one cell to the center of one of +// its edge neighbors. In particular, it can be used to bound the distance +// between adjacent cell centers along the space-filling Hilbert curve for +// cells at any given level. +var ( + MinEdgeMetric = Metric{1, 2 * math.Sqrt2 / 3} + AvgEdgeMetric = Metric{1, 1.459213746386106062} + MaxEdgeMetric = Metric{1, MaxAngleSpanMetric.Deriv} + + // MaxEdgeAspect is the maximum edge aspect ratio over all cells at any level, + // where the edge aspect ratio of a cell is defined as the ratio of its longest + // edge length to its shortest edge length. + MaxEdgeAspect = 1.442615274452682920 + + MinAreaMetric = Metric{2, 8 * math.Sqrt2 / 9} + AvgAreaMetric = Metric{2, 4 * math.Pi / 6} + MaxAreaMetric = Metric{2, 2.635799256963161491} +) + +// The maximum diagonal is also the maximum diameter of any cell, +// and also the maximum geometric width (see the comment for widths). For +// example, the distance from an arbitrary point to the closest cell center +// at a given level is at most half the maximum diagonal length. +var ( + MinDiagMetric = Metric{1, 8 * math.Sqrt2 / 9} + AvgDiagMetric = Metric{1, 2.060422738998471683} + MaxDiagMetric = Metric{1, 2.438654594434021032} + + // MaxDiagAspect is the maximum diagonal aspect ratio over all cells at any + // level, where the diagonal aspect ratio of a cell is defined as the ratio + // of its longest diagonal length to its shortest diagonal length. + MaxDiagAspect = math.Sqrt(3) +) + +// Value returns the value of the metric at the given level. +func (m Metric) Value(level int) float64 { + return math.Ldexp(m.Deriv, -m.Dim*level) +} + +// MinLevel returns the minimum level such that the metric is at most +// the given value, or maxLevel (30) if there is no such level. +// +// For example, MinLevel(0.1) returns the minimum level such that all cell diagonal +// lengths are 0.1 or smaller. The returned value is always a valid level. +// +// In C++, this is called GetLevelForMaxValue. +func (m Metric) MinLevel(val float64) int { + if val < 0 { + return maxLevel + } + + level := -(math.Ilogb(val/m.Deriv) >> uint(m.Dim-1)) + if level > maxLevel { + level = maxLevel + } + if level < 0 { + level = 0 + } + return level +} + +// MaxLevel returns the maximum level such that the metric is at least +// the given value, or zero if there is no such level. +// +// For example, MaxLevel(0.1) returns the maximum level such that all cells have a +// minimum width of 0.1 or larger. The returned value is always a valid level. +// +// In C++, this is called GetLevelForMinValue. +func (m Metric) MaxLevel(val float64) int { + if val <= 0 { + return maxLevel + } + + level := math.Ilogb(m.Deriv/val) >> uint(m.Dim-1) + if level > maxLevel { + level = maxLevel + } + if level < 0 { + level = 0 + } + return level +} + +// ClosestLevel returns the level at which the metric has approximately the given +// value. The return value is always a valid level. For example, +// AvgEdgeMetric.ClosestLevel(0.1) returns the level at which the average cell edge +// length is approximately 0.1. +func (m Metric) ClosestLevel(val float64) int { + x := math.Sqrt2 + if m.Dim == 2 { + x = 2 + } + return m.MinLevel(x * val) +} diff --git a/vendor/github.com/blevesearch/geo/s2/min_distance_targets.go b/vendor/github.com/blevesearch/geo/s2/min_distance_targets.go new file mode 100644 index 0000000..b4cbd43 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/min_distance_targets.go @@ -0,0 +1,362 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// minDistance implements distance interface to find closest distance types. +type minDistance s1.ChordAngle + +func (m minDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) } +func (m minDistance) zero() distance { return minDistance(0) } +func (m minDistance) negative() distance { return minDistance(s1.NegativeChordAngle) } +func (m minDistance) infinity() distance { return minDistance(s1.InfChordAngle()) } +func (m minDistance) less(other distance) bool { return m.chordAngle() < other.chordAngle() } +func (m minDistance) sub(other distance) distance { + return minDistance(m.chordAngle() - other.chordAngle()) +} +func (m minDistance) chordAngleBound() s1.ChordAngle { + return m.chordAngle().Expanded(m.chordAngle().MaxAngleError()) +} + +// updateDistance updates its own value if the other value is less() than it is, +// and reports if it updated. +func (m minDistance) updateDistance(dist distance) (distance, bool) { + if dist.less(m) { + m = minDistance(dist.chordAngle()) + return m, true + } + return m, false +} + +func (m minDistance) fromChordAngle(o s1.ChordAngle) distance { + return minDistance(o) +} + +// MinDistanceToPointTarget is a type for computing the minimum distance to a Point. +type MinDistanceToPointTarget struct { + point Point + dist distance +} + +// NewMinDistanceToPointTarget returns a new target for the given Point. +func NewMinDistanceToPointTarget(point Point) *MinDistanceToPointTarget { + m := minDistance(0) + return &MinDistanceToPointTarget{point: point, dist: &m} +} + +func (m *MinDistanceToPointTarget) capBound() Cap { + return CapFromCenterChordAngle(m.point, s1.ChordAngle(0)) +} + +func (m *MinDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + var ok bool + dist, ok = dist.updateDistance(minDistance(ChordAngleBetweenPoints(p, m.point))) + return dist, ok +} + +func (m *MinDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := UpdateMinDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(minDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MinDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + var ok bool + dist, ok = dist.updateDistance(minDistance(cell.Distance(m.point))) + return dist, ok +} + +func (m *MinDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // For furthest points, we visit the polygons whose interior contains + // the antipode of the target point. These are the polygons whose + // distance to the target is maxDistance.zero() + q := NewContainsPointQuery(index, VertexModelSemiOpen) + return q.visitContainingShapes(m.point, func(shape Shape) bool { + return v(shape, m.point) + }) +} + +func (m *MinDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MinDistanceToPointTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MinDistanceToPointTarget) distance() distance { return m.dist } + +// ---------------------------------------------------------- + +// MinDistanceToEdgeTarget is a type for computing the minimum distance to an Edge. +type MinDistanceToEdgeTarget struct { + e Edge + dist distance +} + +// NewMinDistanceToEdgeTarget returns a new target for the given Edge. +func NewMinDistanceToEdgeTarget(e Edge) *MinDistanceToEdgeTarget { + m := minDistance(0) + return &MinDistanceToEdgeTarget{e: e, dist: m} +} + +// capBound returns a Cap that bounds the antipode of the target. (This +// is the set of points whose maxDistance to the target is maxDistance.zero) +func (m *MinDistanceToEdgeTarget) capBound() Cap { + // The following computes a radius equal to half the edge length in an + // efficient and numerically stable way. + d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1)) + r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2)) + return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}, s1.ChordAngleFromSquaredLength(r2)) +} + +func (m *MinDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + if d, ok := UpdateMinDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(minDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MinDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := updateEdgePairMinDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(minDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MinDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(cell.DistanceToEdge(m.e.V0, m.e.V1))) +} + +func (m *MinDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We test the center of the edge in order to ensure that edge targets AB + // and BA yield identical results (which is not guaranteed by the API but + // users might expect). Other options would be to test both endpoints, or + // return different results for AB and BA in some cases. + target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) + return target.visitContainingShapes(index, v) +} + +func (m *MinDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MinDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MinDistanceToEdgeTarget) distance() distance { return m.dist } + +// ---------------------------------------------------------- + +// MinDistanceToCellTarget is a type for computing the minimum distance to a Cell. +type MinDistanceToCellTarget struct { + cell Cell + dist distance +} + +// NewMinDistanceToCellTarget returns a new target for the given Cell. +func NewMinDistanceToCellTarget(cell Cell) *MinDistanceToCellTarget { + m := minDistance(0) + return &MinDistanceToCellTarget{cell: cell, dist: m} +} + +func (m *MinDistanceToCellTarget) capBound() Cap { + return m.cell.CapBound() +} + +func (m *MinDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(m.cell.Distance(p))) +} + +func (m *MinDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(m.cell.DistanceToEdge(edge.V0, edge.V1))) +} + +func (m *MinDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(m.cell.DistanceToCell(cell))) +} + +func (m *MinDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // The simplest approach is simply to return the polygons that contain the + // cell center. Alternatively, if the index cell is smaller than the target + // cell then we could return all polygons that are present in the + // shapeIndexCell, but since the index is built conservatively this may + // include some polygons that don't quite intersect the cell. So we would + // either need to recheck for intersection more accurately, or weaken the + // VisitContainingShapes contract so that it only guarantees approximate + // intersection, neither of which seems like a good tradeoff. + target := NewMinDistanceToPointTarget(m.cell.Center()) + return target.visitContainingShapes(index, v) +} +func (m *MinDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MinDistanceToCellTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MinDistanceToCellTarget) distance() distance { return m.dist } + +// ---------------------------------------------------------- + +/* +// MinDistanceToCellUnionTarget is a type for computing the minimum distance to a CellUnion. +type MinDistanceToCellUnionTarget struct { + cu CellUnion + query *ClosestCellQuery + dist distance +} + +// NewMinDistanceToCellUnionTarget returns a new target for the given CellUnion. +func NewMinDistanceToCellUnionTarget(cu CellUnion) *MinDistanceToCellUnionTarget { + m := minDistance(0) + return &MinDistanceToCellUnionTarget{cu: cu, dist: m} +} + +func (m *MinDistanceToCellUnionTarget) capBound() Cap { + return m.cu.CapBound() +} + +func (m *MinDistanceToCellUnionTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + m.query.opts.DistanceLimit = dist.chordAngle() + target := NewMinDistanceToPointTarget(p) + r := m.query.findEdge(target) + if r.ShapeID < 0 { + return dist, false + } + return minDistance(r.Distance), true +} + +func (m *MinDistanceToCellUnionTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We test the center of the edge in order to ensure that edge targets AB + // and BA yield identical results (which is not guaranteed by the API but + // users might expect). Other options would be to test both endpoints, or + // return different results for AB and BA in some cases. + target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) + return target.visitContainingShapes(index, v) +} +func (m *MinDistanceToCellUnionTarget) setMaxError(maxErr s1.ChordAngle) bool { + m.query.opts.MaxError = maxErr + return true +} +func (m *MinDistanceToCellUnionTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MinDistanceToCellUnionTarget) distance() distance { return m.dist } +*/ + +// ---------------------------------------------------------- + +// MinDistanceToShapeIndexTarget is a type for computing the minimum distance to a ShapeIndex. +type MinDistanceToShapeIndexTarget struct { + index *ShapeIndex + query *EdgeQuery + dist distance +} + +// NewMinDistanceToShapeIndexTarget returns a new target for the given ShapeIndex. +func NewMinDistanceToShapeIndexTarget(index *ShapeIndex) *MinDistanceToShapeIndexTarget { + m := minDistance(0) + return &MinDistanceToShapeIndexTarget{ + index: index, + dist: m, + query: NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions()), + } +} + +func (m *MinDistanceToShapeIndexTarget) capBound() Cap { + // TODO(roberts): Depends on ShapeIndexRegion existing. + // c := makeS2ShapeIndexRegion(m.index).CapBound() + // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius()) + panic("not implemented yet") +} + +func (m *MinDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMinDistanceToPointTarget(p) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MinDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMinDistanceToEdgeTarget(edge) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MinDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMinDistanceToCellTarget(cell) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +// For target types consisting of multiple connected components (such as this one), +// this method should return the polygons containing the antipodal reflection of +// *any* connected component. (It is sufficient to test containment of one vertex per +// connected component, since this allows us to also return any polygon whose +// boundary has distance.zero() to the target.) +func (m *MinDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // It is sufficient to find the set of chain starts in the target index + // (i.e., one vertex per connected component of edges) that are contained by + // the query index, except for one special case to handle full polygons. + // + // TODO(roberts): Do this by merge-joining the two ShapeIndexes. + for _, shape := range m.index.shapes { + numChains := shape.NumChains() + // Shapes that don't have any edges require a special case (below). + testedPoint := false + for c := 0; c < numChains; c++ { + chain := shape.Chain(c) + if chain.Length == 0 { + continue + } + testedPoint = true + target := NewMinDistanceToPointTarget(shape.ChainEdge(c, 0).V0) + if !target.visitContainingShapes(index, v) { + return false + } + } + if !testedPoint { + // Special case to handle full polygons. + ref := shape.ReferencePoint() + if !ref.Contained { + continue + } + target := NewMinDistanceToPointTarget(ref.Point) + if !target.visitContainingShapes(index, v) { + return false + } + } + } + return true +} + +func (m *MinDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool { + m.query.opts.maxError = maxErr + return true +} +func (m *MinDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 25 } +func (m *MinDistanceToShapeIndexTarget) distance() distance { return m.dist } +func (m *MinDistanceToShapeIndexTarget) setIncludeInteriors(b bool) { + m.query.opts.includeInteriors = b +} +func (m *MinDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b } + +// TODO(roberts): Remaining methods +// +// func (m *MinDistanceToShapeIndexTarget) capBound() Cap { +// CellUnionTarget diff --git a/vendor/github.com/blevesearch/geo/s2/nthderivative.go b/vendor/github.com/blevesearch/geo/s2/nthderivative.go new file mode 100644 index 0000000..73445d6 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/nthderivative.go @@ -0,0 +1,88 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// nthDerivativeCoder provides Nth Derivative Coding. +// (In signal processing disciplines, this is known as N-th Delta Coding.) +// +// Good for varint coding integer sequences with polynomial trends. +// +// Instead of coding a sequence of values directly, code its nth-order discrete +// derivative. Overflow in integer addition and subtraction makes this a +// lossless transform. +// +// constant linear quadratic +// trend trend trend +// / \ / \ / \_ +// input |0 0 0 0 1 2 3 4 9 16 25 36 +// 0th derivative(identity) |0 0 0 0 1 2 3 4 9 16 25 36 +// 1st derivative(delta coding) | 0 0 0 1 1 1 1 5 7 9 11 +// 2nd derivative(linear prediction) | 0 0 1 0 0 0 4 2 2 2 +// ------------------------------------- +// 0 1 2 3 4 5 6 7 8 9 10 11 +// n in sequence +// +// Higher-order codings can break even or be detrimental on other sequences. +// +// random oscillating +// / \ / \_ +// input |5 9 6 1 8 8 2 -2 4 -4 6 -6 +// 0th derivative(identity) |5 9 6 1 8 8 2 -2 4 -4 6 -6 +// 1st derivative(delta coding) | 4 -3 -5 7 0 -6 -4 6 -8 10 -12 +// 2nd derivative(linear prediction) | -7 -2 12 -7 -6 2 10 -14 18 -22 +// --------------------------------------- +// 0 1 2 3 4 5 6 7 8 9 10 11 +// n in sequence +// +// Note that the nth derivative isn't available until sequence item n. Earlier +// values are coded at lower order. For the above table, read 5 4 -7 -2 12 ... +type nthDerivativeCoder struct { + n, m int + memory [10]int32 +} + +// newNthDerivativeCoder returns a new coder, where n is the derivative order of the encoder (the N in NthDerivative). +// n must be within [0,10]. +func newNthDerivativeCoder(n int) *nthDerivativeCoder { + c := &nthDerivativeCoder{n: n} + if n < 0 || n > len(c.memory) { + panic("unsupported n. Must be within [0,10].") + } + return c +} + +func (c *nthDerivativeCoder) encode(k int32) int32 { + for i := 0; i < c.m; i++ { + delta := k - c.memory[i] + c.memory[i] = k + k = delta + } + if c.m < c.n { + c.memory[c.m] = k + c.m++ + } + return k +} + +func (c *nthDerivativeCoder) decode(k int32) int32 { + if c.m < c.n { + c.m++ + } + for i := c.m - 1; i >= 0; i-- { + c.memory[i] += k + k = c.memory[i] + } + return k +} diff --git a/vendor/github.com/blevesearch/geo/s2/paddedcell.go b/vendor/github.com/blevesearch/geo/s2/paddedcell.go new file mode 100644 index 0000000..ac304a6 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/paddedcell.go @@ -0,0 +1,252 @@ +// Copyright 2016 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" +) + +// PaddedCell represents a Cell whose (u,v)-range has been expanded on +// all sides by a given amount of "padding". Unlike Cell, its methods and +// representation are optimized for clipping edges against Cell boundaries +// to determine which cells are intersected by a given set of edges. +type PaddedCell struct { + id CellID + padding float64 + bound r2.Rect + middle r2.Rect // A rect in (u, v)-space that belongs to all four children. + iLo, jLo int // Minimum (i,j)-coordinates of this cell before padding + orientation int // Hilbert curve orientation of this cell. + level int +} + +// PaddedCellFromCellID constructs a padded cell with the given padding. +func PaddedCellFromCellID(id CellID, padding float64) *PaddedCell { + p := &PaddedCell{ + id: id, + padding: padding, + middle: r2.EmptyRect(), + } + + // Fast path for constructing a top-level face (the most common case). + if id.isFace() { + limit := padding + 1 + p.bound = r2.Rect{r1.Interval{-limit, limit}, r1.Interval{-limit, limit}} + p.middle = r2.Rect{r1.Interval{-padding, padding}, r1.Interval{-padding, padding}} + p.orientation = id.Face() & 1 + return p + } + + _, p.iLo, p.jLo, p.orientation = id.faceIJOrientation() + p.level = id.Level() + p.bound = ijLevelToBoundUV(p.iLo, p.jLo, p.level).ExpandedByMargin(padding) + ijSize := sizeIJ(p.level) + p.iLo &= -ijSize + p.jLo &= -ijSize + + return p +} + +// PaddedCellFromParentIJ constructs the child of parent with the given (i,j) index. +// The four child cells have indices of (0,0), (0,1), (1,0), (1,1), where the i and j +// indices correspond to increasing u- and v-values respectively. +func PaddedCellFromParentIJ(parent *PaddedCell, i, j int) *PaddedCell { + // Compute the position and orientation of the child incrementally from the + // orientation of the parent. + pos := ijToPos[parent.orientation][2*i+j] + + p := &PaddedCell{ + id: parent.id.Children()[pos], + padding: parent.padding, + bound: parent.bound, + orientation: parent.orientation ^ posToOrientation[pos], + level: parent.level + 1, + middle: r2.EmptyRect(), + } + + ijSize := sizeIJ(p.level) + p.iLo = parent.iLo + i*ijSize + p.jLo = parent.jLo + j*ijSize + + // For each child, one corner of the bound is taken directly from the parent + // while the diagonally opposite corner is taken from middle(). + middle := parent.Middle() + if i == 1 { + p.bound.X.Lo = middle.X.Lo + } else { + p.bound.X.Hi = middle.X.Hi + } + if j == 1 { + p.bound.Y.Lo = middle.Y.Lo + } else { + p.bound.Y.Hi = middle.Y.Hi + } + + return p +} + +// CellID returns the CellID this padded cell represents. +func (p PaddedCell) CellID() CellID { + return p.id +} + +// Padding returns the amount of padding on this cell. +func (p PaddedCell) Padding() float64 { + return p.padding +} + +// Level returns the level this cell is at. +func (p PaddedCell) Level() int { + return p.level +} + +// Center returns the center of this cell. +func (p PaddedCell) Center() Point { + ijSize := sizeIJ(p.level) + si := uint32(2*p.iLo + ijSize) + ti := uint32(2*p.jLo + ijSize) + return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()} +} + +// Middle returns the rectangle in the middle of this cell that belongs to +// all four of its children in (u,v)-space. +func (p *PaddedCell) Middle() r2.Rect { + // We compute this field lazily because it is not needed the majority of the + // time (i.e., for cells where the recursion terminates). + if p.middle.IsEmpty() { + ijSize := sizeIJ(p.level) + u := stToUV(siTiToST(uint32(2*p.iLo + ijSize))) + v := stToUV(siTiToST(uint32(2*p.jLo + ijSize))) + p.middle = r2.Rect{ + r1.Interval{u - p.padding, u + p.padding}, + r1.Interval{v - p.padding, v + p.padding}, + } + } + return p.middle +} + +// Bound returns the bounds for this cell in (u,v)-space including padding. +func (p PaddedCell) Bound() r2.Rect { + return p.bound +} + +// ChildIJ returns the (i,j) coordinates for the child cell at the given traversal +// position. The traversal position corresponds to the order in which child +// cells are visited by the Hilbert curve. +func (p PaddedCell) ChildIJ(pos int) (i, j int) { + ij := posToIJ[p.orientation][pos] + return ij >> 1, ij & 1 +} + +// EntryVertex return the vertex where the space-filling curve enters this cell. +func (p PaddedCell) EntryVertex() Point { + // The curve enters at the (0,0) vertex unless the axis directions are + // reversed, in which case it enters at the (1,1) vertex. + i := p.iLo + j := p.jLo + if p.orientation&invertMask != 0 { + ijSize := sizeIJ(p.level) + i += ijSize + j += ijSize + } + return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()} +} + +// ExitVertex returns the vertex where the space-filling curve exits this cell. +func (p PaddedCell) ExitVertex() Point { + // The curve exits at the (1,0) vertex unless the axes are swapped or + // inverted but not both, in which case it exits at the (0,1) vertex. + i := p.iLo + j := p.jLo + ijSize := sizeIJ(p.level) + if p.orientation == 0 || p.orientation == swapMask+invertMask { + i += ijSize + } else { + j += ijSize + } + return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()} +} + +// ShrinkToFit returns the smallest CellID that contains all descendants of this +// padded cell whose bounds intersect the given rect. For algorithms that use +// recursive subdivision to find the cells that intersect a particular object, this +// method can be used to skip all of the initial subdivision steps where only +// one child needs to be expanded. +// +// Note that this method is not the same as returning the smallest cell that contains +// the intersection of this cell with rect. Because of the padding, even if one child +// completely contains rect it is still possible that a neighboring child may also +// intersect the given rect. +// +// The provided Rect must intersect the bounds of this cell. +func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID { + // Quick rejection test: if rect contains the center of this cell along + // either axis, then no further shrinking is possible. + if p.level == 0 { + // Fast path (most calls to this function start with a face cell). + if rect.X.Contains(0) || rect.Y.Contains(0) { + return p.id + } + } + + ijSize := sizeIJ(p.level) + if rect.X.Contains(stToUV(siTiToST(uint32(2*p.iLo+ijSize)))) || + rect.Y.Contains(stToUV(siTiToST(uint32(2*p.jLo+ijSize)))) { + return p.id + } + + // Otherwise we expand rect by the given padding on all sides and find + // the range of coordinates that it spans along the i- and j-axes. We then + // compute the highest bit position at which the min and max coordinates + // differ. This corresponds to the first cell level at which at least two + // children intersect rect. + + // Increase the padding to compensate for the error in uvToST. + // (The constant below is a provable upper bound on the additional error.) + padded := rect.ExpandedByMargin(p.padding + 1.5*dblEpsilon) + iMin, jMin := p.iLo, p.jLo // Min i- or j- coordinate spanned by padded + var iXor, jXor int // XOR of the min and max i- or j-coordinates + + if iMin < stToIJ(uvToST(padded.X.Lo)) { + iMin = stToIJ(uvToST(padded.X.Lo)) + } + if a, b := p.iLo+ijSize-1, stToIJ(uvToST(padded.X.Hi)); a <= b { + iXor = iMin ^ a + } else { + iXor = iMin ^ b + } + + if jMin < stToIJ(uvToST(padded.Y.Lo)) { + jMin = stToIJ(uvToST(padded.Y.Lo)) + } + if a, b := p.jLo+ijSize-1, stToIJ(uvToST(padded.Y.Hi)); a <= b { + jXor = jMin ^ a + } else { + jXor = jMin ^ b + } + + // Compute the highest bit position where the two i- or j-endpoints differ, + // and then choose the cell level that includes both of these endpoints. So + // if both pairs of endpoints are equal we choose maxLevel; if they differ + // only at bit 0, we choose (maxLevel - 1), and so on. + levelMSB := uint64(((iXor | jXor) << 1) + 1) + level := maxLevel - findMSBSetNonZero64(levelMSB) + if level <= p.level { + return p.id + } + + return cellIDFromFaceIJ(p.id.Face(), iMin, jMin).Parent(level) +} diff --git a/vendor/github.com/blevesearch/geo/s2/point.go b/vendor/github.com/blevesearch/geo/s2/point.go new file mode 100644 index 0000000..89e7ae0 --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/point.go @@ -0,0 +1,258 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// Point represents a point on the unit sphere as a normalized 3D vector. +// Fields should be treated as read-only. Use one of the factory methods for creation. +type Point struct { + r3.Vector +} + +// sortPoints sorts the slice of Points in place. +func sortPoints(e []Point) { + sort.Sort(points(e)) +} + +// points implements the Sort interface for slices of Point. +type points []Point + +func (p points) Len() int { return len(p) } +func (p points) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p points) Less(i, j int) bool { return p[i].Cmp(p[j].Vector) == -1 } + +// PointFromCoords creates a new normalized point from coordinates. +// +// This always returns a valid point. If the given coordinates can not be normalized +// the origin point will be returned. +// +// This behavior is different from the C++ construction of a S2Point from coordinates +// (i.e. S2Point(x, y, z)) in that in C++ they do not Normalize. +func PointFromCoords(x, y, z float64) Point { + if x == 0 && y == 0 && z == 0 { + return OriginPoint() + } + return Point{r3.Vector{x, y, z}.Normalize()} +} + +// OriginPoint returns a unique "origin" on the sphere for operations that need a fixed +// reference point. In particular, this is the "point at infinity" used for +// point-in-polygon testing (by counting the number of edge crossings). +// +// It should *not* be a point that is commonly used in edge tests in order +// to avoid triggering code to handle degenerate cases (this rules out the +// north and south poles). It should also not be on the boundary of any +// low-level S2Cell for the same reason. +func OriginPoint() Point { + return Point{r3.Vector{-0.0099994664350250197, 0.0025924542609324121, 0.99994664350250195}} +} + +// PointCross returns a Point that is orthogonal to both p and op. This is similar to +// p.Cross(op) (the true cross product) except that it does a better job of +// ensuring orthogonality when the Point is nearly parallel to op, it returns +// a non-zero result even when p == op or p == -op and the result is a Point. +// +// It satisfies the following properties (f == PointCross): +// +// (1) f(p, op) != 0 for all p, op +// (2) f(op,p) == -f(p,op) unless p == op or p == -op +// (3) f(-p,op) == -f(p,op) unless p == op or p == -op +// (4) f(p,-op) == -f(p,op) unless p == op or p == -op +func (p Point) PointCross(op Point) Point { + // NOTE(dnadasi): In the C++ API the equivalent method here was known as "RobustCrossProd", + // but PointCross more accurately describes how this method is used. + x := p.Add(op.Vector).Cross(op.Sub(p.Vector)) + + // Compare exactly to the 0 vector. + if x == (r3.Vector{}) { + // The only result that makes sense mathematically is to return zero, but + // we find it more convenient to return an arbitrary orthogonal vector. + return Point{p.Ortho()} + } + + return Point{x} +} + +// OrderedCCW returns true if the edges OA, OB, and OC are encountered in that +// order while sweeping CCW around the point O. +// +// You can think of this as testing whether A <= B <= C with respect to the +// CCW ordering around O that starts at A, or equivalently, whether B is +// contained in the range of angles (inclusive) that starts at A and extends +// CCW to C. Properties: +// +// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b +// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c +// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c +// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true +// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false +func OrderedCCW(a, b, c, o Point) bool { + sum := 0 + if RobustSign(b, o, a) != Clockwise { + sum++ + } + if RobustSign(c, o, b) != Clockwise { + sum++ + } + if RobustSign(a, o, c) == CounterClockwise { + sum++ + } + return sum >= 2 +} + +// Distance returns the angle between two points. +func (p Point) Distance(b Point) s1.Angle { + return p.Vector.Angle(b.Vector) +} + +// ApproxEqual reports whether the two points are similar enough to be equal. +func (p Point) ApproxEqual(other Point) bool { + return p.approxEqual(other, s1.Angle(epsilon)) +} + +// approxEqual reports whether the two points are within the given epsilon. +func (p Point) approxEqual(other Point, eps s1.Angle) bool { + return p.Vector.Angle(other.Vector) <= eps +} + +// ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance +// between the two given points. The points must be unit length. +func ChordAngleBetweenPoints(x, y Point) s1.ChordAngle { + return s1.ChordAngle(math.Min(4.0, x.Sub(y.Vector).Norm2())) +} + +// regularPoints generates a slice of points shaped as a regular polygon with +// the numVertices vertices, all located on a circle of the specified angular radius +// around the center. The radius is the actual distance from center to each vertex. +func regularPoints(center Point, radius s1.Angle, numVertices int) []Point { + return regularPointsForFrame(getFrame(center), radius, numVertices) +} + +// regularPointsForFrame generates a slice of points shaped as a regular polygon +// with numVertices vertices, all on a circle of the specified angular radius around +// the center. The radius is the actual distance from the center to each vertex. +func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []Point { + // We construct the loop in the given frame coordinates, with the center at + // (0, 0, 1). For a loop of radius r, the loop vertices have the form + // (x, y, z) where x^2 + y^2 = sin(r) and z = cos(r). The distance on the + // sphere (arc length) from each vertex to the center is acos(cos(r)) = r. + z := math.Cos(radius.Radians()) + r := math.Sin(radius.Radians()) + radianStep := 2 * math.Pi / float64(numVertices) + var vertices []Point + + for i := 0; i < numVertices; i++ { + angle := float64(i) * radianStep + p := Point{r3.Vector{r * math.Cos(angle), r * math.Sin(angle), z}} + vertices = append(vertices, Point{fromFrame(frame, p).Normalize()}) + } + + return vertices +} + +// CapBound returns a bounding cap for this point. +func (p Point) CapBound() Cap { + return CapFromPoint(p) +} + +// RectBound returns a bounding latitude-longitude rectangle from this point. +func (p Point) RectBound() Rect { + return RectFromLatLng(LatLngFromPoint(p)) +} + +// ContainsCell returns false as Points do not contain any other S2 types. +func (p Point) ContainsCell(c Cell) bool { return false } + +// IntersectsCell reports whether this Point intersects the given cell. +func (p Point) IntersectsCell(c Cell) bool { + return c.ContainsPoint(p) +} + +// ContainsPoint reports if this Point contains the other Point. +// (This method is named to satisfy the Region interface.) +func (p Point) ContainsPoint(other Point) bool { + return p.Contains(other) +} + +// CellUnionBound computes a covering of the Point. +func (p Point) CellUnionBound() []CellID { + return p.CapBound().CellUnionBound() +} + +// Contains reports if this Point contains the other Point. +// (This method matches all other s2 types where the reflexive Contains +// method does not contain the type's name.) +func (p Point) Contains(other Point) bool { return p == other } + +// Encode encodes the Point. +func (p Point) Encode(w io.Writer) error { + e := &encoder{w: w} + p.encode(e) + return e.err +} + +func (p Point) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeFloat64(p.X) + e.writeFloat64(p.Y) + e.writeFloat64(p.Z) +} + +// Decode decodes the Point. +func (p *Point) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + p.decode(d) + return d.err +} + +func (p *Point) decode(d *decoder) { + version := d.readInt8() + if d.err != nil { + return + } + if version != encodingVersion { + d.err = fmt.Errorf("only version %d is supported", encodingVersion) + return + } + p.X = d.readFloat64() + p.Y = d.readFloat64() + p.Z = d.readFloat64() +} + +// Rotate the given point about the given axis by the given angle. p and +// axis must be unit length; angle has no restrictions (e.g., it can be +// positive, negative, greater than 360 degrees, etc). +func Rotate(p, axis Point, angle s1.Angle) Point { + // Let M be the plane through P that is perpendicular to axis, and let + // center be the point where M intersects axis. We construct a + // right-handed orthogonal frame (dx, dy, center) such that dx is the + // vector from center to P, and dy has the same length as dx. The + // result can then be expressed as (cos(angle)*dx + sin(angle)*dy + center). + center := axis.Mul(p.Dot(axis.Vector)) + dx := p.Sub(center) + dy := axis.Cross(p.Vector) + // Mathematically the result is unit length, but normalization is necessary + // to ensure that numerical errors don't accumulate. + return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()} +} diff --git a/vendor/github.com/blevesearch/geo/s2/point_measures.go b/vendor/github.com/blevesearch/geo/s2/point_measures.go new file mode 100644 index 0000000..6fa9b7a --- /dev/null +++ b/vendor/github.com/blevesearch/geo/s2/point_measures.go @@ -0,0 +1,149 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// PointArea returns the area of triangle ABC. This method combines two different +// algorithms to get accurate results for both large and small triangles. +// The maximum error is about 5e-15 (about 0.25 square meters on the Earth's +// surface), the same as GirardArea below, but unlike that method it is +// also accurate for small triangles. Example: when the true area is 100 +// square meters, PointArea yields an error about 1 trillion times smaller than +// GirardArea. +// +// All points should be unit length, and no two points should be antipodal. +// The area is always positive. +func PointArea(a, b, c Point) float64 { + // This method is based on l'Huilier's theorem, + // + // tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2)) + // + // where E is the spherical excess of the triangle (i.e. its area), + // a, b, c are the side lengths, and + // s is the semiperimeter (a + b + c) / 2. + // + // The only significant source of error using l'Huilier's method is the + // cancellation error of the terms (s-a), (s-b), (s-c). This leads to a + // *relative* error of about 1e-16 * s / min(s-a, s-b, s-c). This compares + // to a relative error of about 1e-15 / E using Girard's formula, where E is + // the true area of the triangle. Girard's formula can be even worse than + // this for very small triangles, e.g. a triangle with a true area of 1e-30 + // might evaluate to 1e-5. + // + // So, we prefer l'Huilier's formula unless dmin < s * (0.1 * E), where + // dmin = min(s-a, s-b, s-c). This basically includes all triangles + // except for extremely long and skinny ones. + // + // Since we don't know E, we would like a conservative upper bound on + // the triangle area in terms of s and dmin. It's possible to show that + // E <= k1 * s * sqrt(s * dmin), where k1 = 2*sqrt(3)/Pi (about 1). + // Using this, it's easy to show that we should always use l'Huilier's + // method if dmin >= k2 * s^5, where k2 is about 1e-2. Furthermore, + // if dmin < k2 * s^5, the triangle area is at most k3 * s^4, where + // k3 is about 0.1. Since the best case error using Girard's formula + // is about 1e-15, this means that we shouldn't even consider it unless + // s >= 3e-4 or so. +