Compare commits

...

1142 Commits

Author SHA1 Message Date
6cc80d2565 Merge pull request #641 from meilisearch/bump-version
Bump version to v0.10.1
2020-04-28 16:12:01 +02:00
5265fafd7a Update the changelog for the release 2020-04-28 15:55:29 +02:00
287226b609 Bump crates versions to v0.10.1 2020-04-28 15:55:29 +02:00
7119b21b46 Merge pull request #640 from MarinPostma/fix_filter_parenthesis
fixes parenthesis
2020-04-28 11:10:45 +02:00
d1f1bfe071 fix floats bug
Update CHANGELOG.md

Co-Authored-By: Clément Renault <renault.cle@gmail.com>
2020-04-28 10:44:07 +02:00
812465e014 fixes parenthesis
adds tests
2020-04-27 22:29:29 +02:00
86bab04997 Merge pull request #635 from lironhl/bug_fix/highlight_longest_area
Bug fix/highlight longest area
2020-04-27 19:34:34 +02:00
867bd1ffd7 Tests for the new highlight algorithm 2020-04-27 20:10:40 +03:00
16e075983d Highlights result with longest match 2020-04-27 20:09:12 +03:00
1b7a6687c8 Update README.md (#630)
* Update README.md

* Update README.md

Co-Authored-By: Clément Renault <renault.cle@gmail.com>

Co-authored-by: Clément Renault <renault.cle@gmail.com>
2020-04-24 10:11:27 +02:00
8c41fb2b49 Merge pull request #623 from lironhl/bug_fix/chrome-content-overflow
Fixes the content overflow in the web interface in chrome.
2020-04-22 13:47:33 +02:00
c1797c4e75 add overflow-wrap css property to content class 2020-04-22 11:33:18 +03:00
1c094346e2 Merge pull request #616 from MarinPostma/array-filter
filters on arrays
2020-04-21 10:58:21 +02:00
cd3c0d750c Add support for filtering on arrays of strings
update changelog

Update CHANGELOG.md

Co-Authored-By: Clément Renault <renault.cle@gmail.com>

fix requested changes
2020-04-21 10:33:57 +02:00
3d2f04a7af Added GitHub discussions 2020-04-20 10:54:08 +02:00
10d047a636 Merge pull request #607 from tpayet/add-separators-tokenizer
Add '@' char as a tokenizer separator
2020-04-16 12:18:11 +02:00
10211737c5 Add '@' char as a tokenizer separator
Update CHANGELOG.md

Co-Authored-By: Clément Renault <renault.cle@gmail.com>
2020-04-16 11:04:03 +02:00
45e55bc054 Merge pull request #608 from matboivin/minor-changes
Minor changes
2020-04-15 20:32:25 +02:00
1892ba8973 Minor changes 2020-04-15 16:04:50 +02:00
b7c287ffb7 Merge pull request #604 from meilisearch/personal-token-binaries
Use a personal access token to publish release binaries
2020-04-10 22:51:30 +02:00
457b645f3c Use a personal access token to publish bins
The default GITHUB_TOKEN expires after 1h
2020-04-10 18:28:28 +02:00
0185ffad89 Merge pull request #603 from meilisearch/bump-version
Bump version to v0.10
2020-04-10 15:56:56 +02:00
08edc9d5d0 Update the changelog to refer to the v0.10 2020-04-10 15:43:20 +02:00
979bea0327 Bump MeiliSearch version to v0.10 2020-04-10 15:43:03 +02:00
c7ea9f4cf3 Merge pull request #580 from meilisearch/rework-highlight-crop
Rework query highlight/crop parameters
2020-04-10 13:27:35 +02:00
233651bef8 update changelog 2020-04-10 12:26:53 +02:00
c6fb591348 add * on attributesToRetrieve 2020-04-10 12:26:34 +02:00
644e78df89 Add some tests 2020-04-10 12:26:34 +02:00
500eeca3fb Rework query highlight/crop parameters 2020-04-10 11:12:58 +02:00
c418abe92d Merge pull request #602 from meilisearch/fix-tide-cors
fix tide cors
2020-04-10 10:29:55 +02:00
2fdf33a006 update changelog 2020-04-10 10:13:43 +02:00
c3cf0cade9 fix tide cors 2020-04-10 10:13:43 +02:00
210bc68ced Merge pull request #592 from MarinPostma/query-filters
Implements query filters
2020-04-09 18:43:11 +02:00
193bded4b7 fixes broken tests 2020-04-09 18:26:48 +02:00
8f4d090f34 update changelog 2020-04-09 17:20:37 +02:00
a0a481697b replace lazy_static with once_cell 2020-04-09 17:13:34 +02:00
c3d5778aae allows to get names from schema 2020-04-09 17:13:34 +02:00
3e031d8297 adds error handling and integration 2020-04-09 17:13:34 +02:00
83f50914ec tests 2020-04-09 17:13:34 +02:00
d3916f28aa implements filter logic 2020-04-09 17:13:34 +02:00
dcf1096ac3 implements parser 2020-04-09 17:13:31 +02:00
66568a913c logic skeleton for filter and parser 2020-04-09 16:08:05 +02:00
6db6b40659 Merge pull request #594 from meilisearch/fix-stop-words
Fixes the stop words and words fst generation
2020-04-07 11:06:39 +02:00
780ac5cfd3 Update the CHANGELOG.md 2020-04-06 19:47:57 +02:00
d24209f5a7 Adds a test to check that stop word ar correctly handled 2020-04-06 19:47:57 +02:00
29d021ad4d Fixes the stop words and words fst generation 2020-04-06 18:53:02 +02:00
eb28276923 Merge pull request #589 from meilisearch/change-logo
change logo format
2020-04-05 12:18:36 +02:00
0679ec4f41 change logo format 2020-04-05 11:09:38 +02:00
1b5b71869f Merge pull request #588 from techieshark/patch-1
Fix typo in README
2020-04-05 10:35:30 +02:00
6681681a76 Merge branch 'master' into patch-1 2020-04-05 10:34:10 +02:00
83d8dc0d2b Merge pull request #587 from sgummaluri/fix_first_all_updates_call_after_indexing
Fix for 'Update Status after the first update comes up to be empty (#542)'
2020-04-05 10:32:27 +02:00
49499ca54d Fix typo in README
Non-plural would be more usual in English. I assume "performances" was a typo.
2020-04-05 17:34:12 +10:00
16a63c74ea Modifying the test name for better readability 2020-04-05 00:26:09 +05:30
b4df54197b Slight grammar modification to the changelog message 2020-04-05 00:17:47 +05:30
a28b428074 Update changelog to make the message more readable 2020-04-05 00:14:58 +05:30
e5a336a042 Fix for 'First update does not appear before being processed' #542 2020-04-04 23:18:43 +05:30
5e5702833c Merge pull request #583 from meilisearch/gha-ignore-changelog
Ignores the CHANGELOG when a specific label is set
2020-04-03 15:47:20 +02:00
03063cf349 Ingores the CHANGELOG when label asks for 2020-04-03 15:06:25 +02:00
241b842ef7 Merge pull request #581 from meilisearch/publish-armv8-binary
Publish an aarch64 binary on releases
2020-04-03 11:56:35 +02:00
184c290773 Update the CHANGELOG 2020-04-03 10:42:19 +02:00
5c638184e9 Publish an aarch64 (aka ARMv8) binary on releases 2020-04-03 10:39:28 +02:00
3a88910a24 Merge pull request #579 from meilisearch/update-deps
Update dependencies
2020-04-02 20:24:23 +02:00
eddd453564 Makes http-service a dev-dependency 2020-04-02 18:36:35 +02:00
38c43759bb Update most of the dependencies 2020-04-02 18:36:04 +02:00
26225a2fdf Merge pull request #576 from ppamorim/fix-bench
Fix benchmark
2020-04-02 12:23:31 +02:00
9950fffb6f Simplify imports of std::fs and std::io, remove space not needed, Remove UpdateState 2020-04-02 11:02:19 +01:00
f5d57c9dce Replace the toml reader with the JSON settings reader, directly parse the data to SettingsUpdate, Update CHANGELOG 2020-04-02 11:01:56 +01:00
bc9c80a5ee Merge pull request #577 from meilisearch/change-slogan
Change the slogan
2020-04-01 16:35:59 +02:00
702f7445ec Change the slogan 2020-04-01 16:34:24 +02:00
dcb93e3166 Merge pull request #575 from ppamorim/nested-seq
Support nested-seq
2020-04-01 14:16:47 +02:00
02b79e0040 Modified JSON to add move conditions 2020-04-01 12:59:40 +01:00
88b71fb6c4 Update CHANGELOG to add seq support 2020-04-01 12:59:40 +01:00
95bb443430 Add empty seq 2020-04-01 12:59:40 +01:00
1b47a10e89 Add support for seq values 2020-04-01 12:59:40 +01:00
006e54109b Merge pull request #570 from tpayet/clean-readme-heroku
Removing Heroku deployment from README
2020-04-01 11:35:29 +02:00
7eb6333933 Removing Heroku deployment from README 2020-04-01 11:04:16 +02:00
065da3d613 Merge pull request #572 from ppamorim/ignore-null-nested-obj
Add support of nested null
2020-03-31 16:33:16 +02:00
e698fa0b63 Add issue index in the CHANGELOG 2020-03-31 15:06:04 +01:00
8b662be42b Update CHANGELOG.md
Co-Authored-By: Clément Renault <renault.cle@gmail.com>
2020-03-31 15:03:35 +01:00
52a4f7cd23 Update readme 2020-03-31 14:41:22 +01:00
690b8e0dd0 Replace .toString to String::new() 2020-03-31 14:01:44 +01:00
bc6d86c8ce serialize_unit returns a empty string 2020-03-31 13:51:12 +01:00
fbf7117d6a Rename function, add trailing line, replace JSON string with macro 2020-03-31 13:13:09 +01:00
51472142c6 Add test to check if nested null will be ignored 2020-03-31 12:00:13 +01:00
91d1bd5903 Merge pull request #569 from meilisearch/ignore-bool-nested-obj
Make the engine index booleans
2020-03-31 11:01:26 +02:00
69aee870da Make the engine index booleans
The engine will see the values like text "true" and "false"
2020-03-31 10:39:58 +02:00
3b25bd71ab Merge pull request #567 from meilisearch/fix-not-dedup-matches
Construct a Set using the from_dirty method
2020-03-31 10:15:03 +02:00
c18e907f96 Construct a Set using the from_dirty method
This commit fixes #566 by ensuring that the slice of matches is
ordered and deduplicated.
2020-03-30 20:56:30 +02:00
e3808b8694 Merge pull request #558 from matboivin/update-readme
Update readme
2020-03-28 10:46:00 +01:00
116b301359 Add Slack 2020-03-28 10:28:48 +01:00
3ed510b78e Minor fix 2020-03-28 10:28:30 +01:00
565c46fdd4 Merge pull request #548 from tendant/master
Stringify nested JSON object
2020-03-27 19:57:34 +01:00
b0255076de Merge branch 'master' into master 2020-03-27 19:43:02 +01:00
67348f2251 Merge pull request #555 from meilisearch/add-changelog
Add a CHANGELOG.md file
2020-03-27 19:33:39 +01:00
227bc716d8 Add a Github Action to ensure the CHANGELOG is updated in PRs 2020-03-27 19:12:50 +01:00
c3467313e5 Add a CHANGELOG to help the documentation follow the engine udpates 2020-03-27 19:01:46 +01:00
c82eed010a Merge pull request #543 from MarinPostma/aligned-search-crops
adds support for aligned crop in search result
2020-03-27 18:58:45 +01:00
158c2b5382 tests aligned crop 2020-03-27 18:38:41 +01:00
2d1d59acb7 adds support for aligned cropping with cjk 2020-03-27 18:38:41 +01:00
0088de9802 adds support for aligned crop in search result 2020-03-27 18:38:41 +01:00
f49d2bca64 Merge branch 'master' into master 2020-03-27 17:07:06 +01:00
b7273c450f Merge pull request #545 from matboivin/update-readme
Update readme
2020-03-27 11:49:11 +01:00
4130fddcc8 Center-align crates demo gif 2020-03-27 11:28:57 +01:00
4f05045acb Center-align web interface gif 2020-03-27 11:20:30 +01:00
bc16c9beb7 Update gif links 2020-03-27 11:17:31 +01:00
0af9f6cf6e Add movies gif and move crates demo gif 2020-03-27 11:17:17 +01:00
022aeac808 Stringify nested JSON object 2020-03-26 18:45:57 -07:00
20461ccf36 Add gif
Co-Authored-By: cvermand <33010418+bidoubiwa@users.noreply.github.com>
2020-03-26 21:56:27 +01:00
7297396162 Update performance 2020-03-26 19:22:59 +01:00
c15deb41b0 Remove How it works (deep dive) section 2020-03-26 16:26:43 +01:00
cb2a08db7e Center-align badges 2020-03-26 16:24:03 +01:00
67703b5ea2 Remove Notes about system allocator 2020-03-26 16:17:47 +01:00
c445abb982 Replace a by an
Co-Authored-By: Clément Renault <renault.cle@gmail.com>
2020-03-26 16:14:52 +01:00
38d97fa339 Change phrasing 2020-03-26 13:48:08 +01:00
d45f0819be Remove repetitive word 2020-03-26 13:25:57 +01:00
9375d0efbe Fix details 2020-03-26 13:23:20 +01:00
2291c33074 Align with quick start guide 2020-03-26 13:18:11 +01:00
0a216066f4 Split commands 2020-03-26 13:13:02 +01:00
eea2a9cfc3 Add contact 2020-03-26 13:10:44 +01:00
33c2b9c5ff Add social 2020-03-26 13:04:23 +01:00
1129812e6e Update link formatting 2020-03-26 12:42:41 +01:00
b1b0c6b4b3 Add useful links 2020-03-26 12:31:58 +01:00
6ae3f2f8b9 Remove line under logo 2020-03-26 12:24:02 +01:00
f8d594e7ea Update formatting and add logo 2020-03-26 12:23:09 +01:00
38c3aa542f Add logo image 2020-03-26 12:05:53 +01:00
f3382125e1 Merge branch 'master' of git://github.com/meilisearch/MeiliSearch into update-readme 2020-03-26 12:01:40 +01:00
592a438ae8 Rephrase the readme 2020-03-26 11:59:40 +01:00
d84a86897c Merge pull request #540 from meilisearch/publish-arm-binaries
Publish an ARMv7 binary for the releases
2020-03-26 11:14:48 +01:00
88c063e887 Publish an ARMv7 binary for the releases 2020-03-26 10:51:47 +01:00
ba8a410d4c Merge pull request #539 from emresaglam/html-sanitize
html sanitize
2020-03-25 21:33:03 +01:00
451061f4b8 Merge branch 'master' into html-sanitize 2020-03-25 13:06:18 -07:00
ae17aa4955 Update meilisearch-http/public/interface.html
bypassing <em> tag after encoding the "<>"

Co-Authored-By: Clément Renault <renault.cle@gmail.com>
2020-03-25 12:48:59 -07:00
f589d07706 Merge pull request #544 from meilisearch/add-slack-link
Add a slack badge on readme
2020-03-25 20:29:00 +01:00
3f343ebfdb Update README.md 2020-03-25 20:22:04 +01:00
95ea3e39d2 Merge pull request #541 from MarinPostma/search-result-count
Adds number of hits in search result
2020-03-25 15:34:06 +01:00
a6dcd7a421 fixes tests
fixes tests impacted by sifnature change of query
2020-03-25 15:17:20 +01:00
fa9b7dd29f removes useless deserializer for SearchResult 2020-03-25 13:59:15 +01:00
fd65cf9dcb populates exhaustive number of hits 2020-03-25 12:44:38 +01:00
6e9d7f94d4 adds exhaustive number hits to search result 2020-03-25 12:11:37 +01:00
6151bc262f Added the missing function call 2020-03-24 11:03:16 -07:00
b62f9fabf2 Update meilisearch-http/public/interface.html
Co-Authored-By: Clément Renault <renault.cle@gmail.com>
2020-03-24 10:39:53 -07:00
86e1ba871f html sanitize
Added a function to sanitize the html
This is for browser side only.
2020-03-24 08:37:56 -07:00
a6ac902bf4 Merge pull request #534 from curquiza/homebrew-automatization
Automate homebrew publish
2020-03-20 16:14:41 +01:00
4cdb67c249 Automate homebrew publish 2020-03-20 12:14:08 +01:00
29622e11f5 Merge pull request #533 from meilisearch/bump-to-v0.9.0
Bump the workspace crates to 0.9.0
2020-03-19 13:50:55 +01:00
3ca8db2cc1 Bump the workspace crates to 0.9.0 2020-03-19 11:56:23 +01:00
cc5eb885ea Merge pull request #531 from meilisearch/bump-rc
Bump the workspace crates to 0.9.0-rc.1
2020-03-16 18:09:11 +01:00
f6972ec682 Bump the workspace crates to 0.9.0-rc.1 2020-03-16 16:58:20 +01:00
cfe21f7b02 Merge pull request #530 from meilisearch/fix-ranking-rules-inference
Ranking fields should be stored and indexed by default
2020-03-16 16:53:06 +01:00
2d82f1b655 ranking fields should be stored and indexed by default; fix #521 2020-03-16 16:19:23 +01:00
cf6e481c14 Merge pull request #520 from meilisearch/fix-http-issues
Fix http issues
2020-03-11 15:21:50 +01:00
7be376721c global settings update make partial update; fix #516 2020-03-11 14:42:58 +01:00
ce0e8415d5 adding primary-key when adding documents does not work; fix #519 2020-03-11 14:12:38 +01:00
4ccf1d10bd error message when impossible to infer the primary-key; fix #517 2020-03-11 12:27:42 +01:00
c25641ff2d fix that AcceptNewFields does not take into account the primary-key; fix #518 2020-03-11 12:00:40 +01:00
14c1aba6c7 Merge pull request #509 from meilisearch/fix-internal-schema
Fix internal schema
2020-03-10 16:25:36 +01:00
8204d961de allow api key in header when no master-key is set; fix #515 2020-03-10 15:59:16 +01:00
ef3bcd65ab fix comments from review 2020-03-10 15:59:11 +01:00
b06e33f3d3 fix errors on http parameter naming 2020-03-10 12:08:10 +01:00
179969a9e2 fix tests + fmt 2020-03-10 11:29:56 +01:00
c984d8d5a5 rename identifier into primaryKey; fix #514 2020-03-09 18:45:29 +01:00
8ffa80883a remove the unused function 2020-03-09 18:45:29 +01:00
86c3482cbd review the internal schema to allow to create schema without identifier; fix #513 2020-03-09 18:45:20 +01:00
16a99aa95e update to infer identifier; fix #498 2020-03-06 10:55:25 +01:00
6d86968c4c Merge pull request #496 from meilisearch/small-fixes-before-0.9
Fix some issues before v0.9
2020-03-06 10:28:45 +01:00
8df6d6e954 fix error 500 when sending bad rankingRules; fix #500 2020-03-06 10:15:19 +01:00
8aeddec982 remove the route to get identifier on settings; fix #502 2020-03-06 10:15:19 +01:00
f4ae0844ab replace index-new-field route to accept-new-fields; fix #503 2020-03-06 10:15:19 +01:00
d56968cb23 default values of synonyms and stop-words; fix #499 fix #504 2020-03-06 10:15:19 +01:00
c5b6e641a4 index UID format; fix #497 2020-03-06 10:15:19 +01:00
041eed2a06 no id returned; fix #492 2020-03-06 10:15:19 +01:00
54c675e195 fix delete-batch route; #493 2020-03-06 10:15:19 +01:00
81ce90e57f update test 2020-03-06 10:15:19 +01:00
6016f2e941 change wording of custom ranking rules dsc -> desc; #490 2020-03-06 10:15:19 +01:00
4d27318b72 remove unnecessary comment on env Opt; #491 2020-03-06 10:15:11 +01:00
decce4d8e4 change route /keys/ -> /keys; #495 2020-03-05 15:33:02 +01:00
1cb9f75026 Merge pull request #507 from meilisearch/fix-documents-fields-order-inference
Fix the inference of the documents searchable fields
2020-03-04 14:16:36 +01:00
5e31d28759 Fix the inference of the documents searchable fields 2020-03-03 20:54:17 +01:00
2b780ab2c5 Merge pull request #489 from meilisearch/fix-rank-distinct
Use distinct on search
2020-03-02 16:34:27 +01:00
a2f0f95337 use distinct on search 2020-03-02 16:19:41 +01:00
72450c765d Merge pull request #484 from meilisearch/fix-reindex-by-chunk
Stop reindexing by chunk during complete reindexing
2020-02-28 18:29:25 +01:00
250aeaa86c stop reindexing by chunk during complete reindexing 2020-02-28 11:49:12 +01:00
06ace88901 Merge pull request #482 from meilisearch/review-settings-endpoint
Review settings endpoint
2020-02-28 11:39:38 +01:00
47009615ee rename words_position to wordsPosition; fix #483 2020-02-27 16:24:49 +01:00
dda08d60d2 cargo fmt 2020-02-27 14:33:57 +01:00
f182afc50b update tests 2020-02-27 11:30:23 +01:00
bb5d931f16 rename criterions on settings route; fix #480 2020-02-27 11:30:22 +01:00
3c74e71d4f show default ranking rules if user reset them; fix #476 2020-02-27 11:30:17 +01:00
79e07fa852 reset value of searchable and displayed attributes; fix #473 2020-02-27 11:04:39 +01:00
aa95c26e07 update tests 2020-02-27 11:04:39 +01:00
2eb6f81c58 rename ranking_distinct to distinct_attribute; fix #474 2020-02-27 11:04:39 +01:00
a067a1b16b replace index_new_fields to accept_new_fields; fix #475 2020-02-27 11:04:38 +01:00
1df51c52e0 Merge pull request #458 from meilisearch/rename-exactness-criterion
Rename the Exact criterion into Exactness
2020-02-25 16:23:57 +01:00
96248d9bfa Change the exactness criterion in the tests 2020-02-25 14:24:15 +01:00
9d167c08f4 Rename the Exact criterion into Exactness 2020-02-25 14:16:55 +01:00
8e6560d102 Merge pull request #464 from meilisearch/simplify-keys
Simplify keys & add launcher resume
2020-02-17 13:59:41 +01:00
ad83c3ab5a add launch resume & environment 2020-02-17 10:13:08 +01:00
257b7b4df4 introduce new key management 2020-02-14 12:54:07 +01:00
5ac757a5fd Merge pull request #465 from meilisearch/fix-un-rankable-fields
fix un-rankable fields errors
2020-02-14 11:27:12 +01:00
2d7a1bfce0 fix un-rankable fields errors; fix #463 2020-02-14 10:34:33 +01:00
3845b89a16 Merge pull request #441 from meilisearch/issues-0.9.0
Stabilize http endpoint
2020-02-13 15:57:37 +01:00
ce8e12c7c5 update tests 2020-02-13 12:24:30 +01:00
4986adc186 move identifier from settings to index; fix #470 2020-02-12 17:00:14 +01:00
dc9ca2ebc9 fixes for review 2020-02-12 16:51:14 +01:00
40d7396d90 update tests for settings 2020-02-11 15:28:01 +01:00
559c2f8907 Add stop words on query 2020-02-11 15:28:00 +01:00
dc6907e748 rebase from master 2020-02-11 15:28:00 +01:00
2143226f04 setup clippy and make a pass on code 2020-02-11 15:28:00 +01:00
ea2a64a504 remove unecessary settings routes 2020-02-11 15:28:00 +01:00
a5b0e468ee fix for review 2020-02-11 15:28:00 +01:00
14b5fc4d6c cargo fmt 2020-02-11 15:28:00 +01:00
f498bfed51 add test on /settings/ranking 2020-02-11 15:27:59 +01:00
50a9825a0f fix some uses cases on settings 2020-02-11 15:27:59 +01:00
5c49f08bb2 update settings routes 2020-02-11 15:27:59 +01:00
bbf9f41a04 add cors 2020-02-11 15:27:59 +01:00
6a32432b01 add /settings/index-new-fields routes 2020-02-11 15:27:59 +01:00
037724576e update tests 2020-02-11 15:27:59 +01:00
10b8a0ab00 add request middleware 2020-02-11 15:27:59 +01:00
faf0dd2f44 do not show matches on undesired fields 2020-02-11 15:27:58 +01:00
585bba43a0 set new attributes indexed if needed 2020-02-11 15:27:58 +01:00
b1528f9466 allow to see highlihts with matches and crop; fix #450 #449 2020-02-11 15:27:58 +01:00
7a491a64c0 add test 2020-02-11 15:27:58 +01:00
57503ad9bf add test on search 2020-02-11 15:27:58 +01:00
c276dda305 run cargo fmt 2020-02-11 15:27:58 +01:00
9c0497c419 change the way settings are show in updates 2020-02-11 15:27:58 +01:00
b33dac9faa add test for search + update ci for test in release 2020-02-11 15:27:57 +01:00
f77f38dfa0 fix update system 2020-02-11 15:27:57 +01:00
58fe87067b finish settings 2020-02-11 15:27:57 +01:00
dbba310770 squash me 2020-02-11 15:27:57 +01:00
6deb481589 definitely remove attributes_ranked on settings; auto create it with ranking_rules 2020-02-11 15:27:57 +01:00
036977bfe4 add the possibility to totally clear the schema 2020-02-11 15:27:57 +01:00
d280848ff6 add test for settings 2020-02-11 15:27:56 +01:00
7a6f583b1f fix issue on ranking rules 2020-02-11 15:27:56 +01:00
e078eafb1f clean unused functions 2020-02-11 15:27:56 +01:00
6f534540a6 fix error on stop words fst 2020-02-11 15:27:56 +01:00
38d57d213f expose api for new settings 2020-02-11 15:27:56 +01:00
7c14769226 add test for index creation 2020-02-11 15:27:56 +01:00
b71bbcffaa simplify error handling 2020-02-11 15:27:56 +01:00
f83e874e35 return the good created_at and updated_at on index creation 2020-02-11 15:27:55 +01:00
ae0a11e422 fix schema & fix tests 2020-02-11 15:27:55 +01:00
116a637cfd set test for healthyness 2020-02-11 15:27:55 +01:00
83cf683db4 introduce test for meilisearch-http 2020-02-11 15:27:55 +01:00
1b3312871e set name optional during index creation 2020-02-11 15:27:55 +01:00
0e12920910 bump tide version 2020-02-11 15:27:55 +01:00
a35eb16a2a store the schema after each document updates 2020-02-11 15:27:54 +01:00
4f0ead625b adapt meilisearch-http to the new schemaless option 2020-02-11 15:27:54 +01:00
21d122a870 rewrite indexed_pos -> field_id for hightligths 2020-02-11 15:27:54 +01:00
130fb74928 introduce a new schemaless way 2020-02-11 15:27:54 +01:00
bbe1845f66 squash-me 2020-02-11 15:27:54 +01:00
2ee90a891c introduce a new settings update system 2020-02-11 15:27:54 +01:00
203c83bdb4 Remove SearchableAttributes; fix #429 2020-02-11 15:27:53 +01:00
73918d803c Rename AttributesToSearchIn into SearchableAttributes; fix #428 2020-02-11 15:27:53 +01:00
110adcae85 Remove the schema; fix #422 2020-02-11 15:27:53 +01:00
c536ea64c3 Change the indexes stats HTTP route; fix #423 2020-02-11 15:27:53 +01:00
aa7a6d5f8c Rewrite the synonyms endpoint; fix #418 2020-02-11 15:27:53 +01:00
91c6539baf Rewrite the stop-words endpoint; fix #417 2020-02-11 15:27:53 +01:00
f0590d3301 Change documents routes; fix #416 2020-02-11 15:27:53 +01:00
a5c5df0290 Merge pull request #443 from curquiza/brew
Add Brew installation in README
2020-02-10 16:36:33 +01:00
f0c2913dcf Add Brew installation in README 2020-02-10 16:26:50 +01:00
9c6d590950 Merge pull request #442 from curquiza/docker-github-action
Change github action for docker latest image
2020-02-10 16:26:14 +01:00
ab3339f5a1 Change github action for docker latest image 2020-02-10 16:11:45 +01:00
43ce45f62b Merge pull request #456 from djKooks/update/cjk-filter-ko-ja
Update CJK filter
2020-01-30 09:46:08 +01:00
2b5d153361 Update cjk filter 2020-01-30 09:55:16 +09:00
cde8845143 Merge pull request #454 from meilisearch/fix-db-compaction
Support compaction with the new split database
2020-01-24 17:45:34 +01:00
7c0d8f073b Support compaction with multi database 2020-01-24 17:38:14 +01:00
69adb1d771 Merge pull request #453 from meilisearch/introduce-query-tree
Introduce a query tree structure
2020-01-23 10:40:53 +01:00
a2bc689b92 Fix the tests a little bit 2020-01-22 18:12:56 +01:00
a9adbda2cd Make the engine support non-exact multi-words synonyms 2020-01-22 18:11:58 +01:00
0b9fe2c072 Introduce the new Query Tree creation supporting more operations 2020-01-22 17:46:46 +01:00
789e05304c Replace prints by debug logs 2020-01-21 11:05:34 +01:00
7604387701 Clean up the dependencies 2020-01-21 11:04:25 +01:00
daffcaf4c6 Make the docids OR operation method conditional 2020-01-19 12:29:06 +01:00
ff1ec599e0 Try a better version of sdset 2020-01-19 12:01:24 +01:00
e44d498c94 Display more debug info for prefix tolerant fetches 2020-01-19 11:07:32 +01:00
c334d6b7fe Avoid sorting sorted sequences, prefer using set operations 2020-01-19 10:58:01 +01:00
5465e401bb Catch query tree related errors 2020-01-17 10:41:27 +01:00
9cc3c56c9c Fix the prefix system 2020-01-16 18:41:27 +01:00
d7a7560220 Use an union instead of a sort for prefix fetching 2020-01-16 17:09:27 +01:00
70a529d197 Reduce the number of args of update functions 2020-01-16 16:29:50 +01:00
be31a14326 Make the clear all operation clear caches 2020-01-16 16:19:04 +01:00
96139da0d2 Reintroduce the distinct search system 2020-01-16 15:55:55 +01:00
74fa9ee4df Introduce a better higlighting system 2020-01-16 14:56:16 +01:00
00336c5154 Reintroduce a basic highlight display 2020-01-16 14:24:45 +01:00
3912d1ec4b Improve query parsing and interpretation 2020-01-16 14:11:17 +01:00
70d4f47f37 Differentiate short words as prefix or exact matches 2020-01-16 12:01:51 +01:00
9809ded23d Implement synonym fetching 2020-01-16 11:38:23 +01:00
5f9a3546e0 Use an union instead of a sort for OR ops 2020-01-15 15:14:24 +01:00
db625a08f7 Update lock file 2020-01-15 12:25:14 +01:00
44fec1b6c9 Cache prefixes of a length of 2 2020-01-14 18:17:52 +01:00
54dacb362d Use different algorithms for different documents ratios 2020-01-14 17:51:08 +01:00
6edb460bea Try with an exponential search 2020-01-14 16:52:24 +01:00
40dab80dfa Change the way we filter the documents 2020-01-14 14:18:01 +01:00
681711fced Fix query ids to be usize 2020-01-14 13:12:42 +01:00
21c1473e0c Introduce the distance data 2020-01-14 11:38:04 +01:00
8acbdcbbad wip: Make the new query tree work with the criteria 2020-01-13 14:36:06 +01:00
da8abebfa2 Introduce the query words mapping along with the query tree 2020-01-13 13:29:47 +01:00
4f7a7ea0bb Faster intersection group by 2020-01-09 16:30:03 +01:00
d6c9ba8f08 Store the postings lists 2020-01-09 15:04:53 +01:00
ec8916bf54 Change the debug outputs 2020-01-09 12:05:39 +01:00
81c573ec92 Add the raw document IDs to the postings lists 2020-01-08 15:30:43 +01:00
9420edadf4 Introduce the Postings type to decorrelate the DocumentIds 2020-01-08 14:48:23 +01:00
d724a7659e Introduce a query tree context struct 2020-01-08 13:37:22 +01:00
887c212b49 Add more logs about the docids construction 2020-01-08 13:22:42 +01:00
07937ed6d7 Use the prefix caches 2020-01-08 13:14:07 +01:00
a262c67ec3 limit the search in the FST 2020-01-08 13:06:12 +01:00
13ca30c4d8 WIP: Made the query tree traversing support prefix search 2020-01-08 12:02:58 +01:00
fbcec2975d wip: Impl a basic tree traversing 2020-01-07 18:24:13 +01:00
6e1f4af833 wip: Create a tree from query but need to show synonyms 2020-01-07 18:24:13 +01:00
856c5c4214 Fix group offset computing 2019-12-31 14:24:10 +01:00
670e80c151 Use the cached postings lists in the query system 2019-12-31 13:32:36 +01:00
eed07c724f Add more logging for postings lists fetching by word 2019-12-31 13:32:36 +01:00
99d35fb940 Introduce a first version of a number of candidates reducer
It works by ignoring the postings lists associated to documents that the previous words did not returned
2019-12-31 13:32:36 +01:00
106b886873 Cache the prefix postings lists 2019-12-30 18:01:32 +01:00
928876b553 Introduce the postings lists caching stores
Currently not used
2019-12-30 18:01:27 +01:00
58836d89aa Rename the PrefixCache into PrefixDocumentsCache 2019-12-30 15:42:09 +01:00
1a5a104f13 Display proximity evaluation number of calls 2019-12-30 15:42:09 +01:00
9790c393a0 Change the time measurement of the query 2019-12-30 15:42:08 +01:00
064cfa4755 Add more debug, where are those 100ms 2019-12-30 15:42:08 +01:00
ed6172aa94 Add a time measurement of the criterion loop 2019-12-30 15:42:08 +01:00
8c140f6bcd Increase the disk usage limit 2019-12-30 15:42:08 +01:00
1e1f0fcaf5 Introduce a basic cache system for first letters 2019-12-30 15:42:08 +01:00
d21352a109 Change the time measurement of the FST 2019-12-30 15:42:08 +01:00
4be11f961b Use an ugly trick to avoid cloning the FST 2019-12-30 15:42:07 +01:00
1163f390b3 Restrict FST search to the first letter of the word 2019-12-30 15:42:07 +01:00
534143e91d Merge pull request #439 from meilisearch/fix-update-deadlock
Fix a blocking channel, appearing like a deadlock
2019-12-30 15:41:26 +01:00
691e2a3c1d Fix a blocking channel, appearing like a deadlock 2019-12-30 15:28:28 +01:00
20b92fcb4c Merge pull request #435 from meilisearch/debug-missing-measurements
Add more debug timings
2019-12-20 18:04:21 +01:00
04bb49989f Add more debug timings 2019-12-20 14:18:48 +01:00
2aa7cb9d20 Merge pull request #433 from meilisearch/fix-index-creation
Set the indexes info in the create_index function
2019-12-19 10:59:47 +01:00
d12ff15ee3 Set the indexes info in the create_index function 2019-12-19 10:38:56 +01:00
11b684114d Merge pull request #431 from curquiza/web-interface-readme
Update REAME with the Web Interface introduction
2019-12-18 13:50:12 +01:00
1bf177f81a Update REAME with the Web Interface introduction
Co-Authored-By: cvermand <33010418+bidoubiwa@users.noreply.github.com>
2019-12-18 13:41:15 +01:00
df7dc54409 Merge pull request #415 from meilisearch/fix-blocking-settings
Use a main read transaction instead of a write one
2019-12-17 16:21:41 +01:00
7e86056a27 Use a main read transaction instead of a write one 2019-12-17 15:48:06 +01:00
59f74dabe7 Merge pull request #407 from meilisearch/friendly-web-interface
Friendly web interface
2019-12-17 14:47:24 +01:00
4610198ba2 Introduce a Bulma based web interface 2019-12-17 14:36:26 +01:00
3d19f566b6 Merge pull request #406 from bidoubiwa/remove_nsfw_movie
Removed nsfw movie from movies.json dataset
2019-12-13 17:56:09 +01:00
8d90cd8e35 Removed nsfw movie from movies.json dataset 2019-12-13 17:21:46 +01:00
610d44e703 Merge pull request #401 from tpayet/feat/heroku-button
Add heroku one-click deploy
2019-12-13 16:26:31 +01:00
0272b44d7e Add heroku one-click deploy 2019-12-13 16:03:00 +01:00
3eccf2fd76 Merge pull request #405 from meilisearch/disable-bench-workflow
Disable the benchmarks github workflow
2019-12-13 15:56:16 +01:00
736f285092 Disable the benchmarks github workflow 2019-12-13 15:37:24 +01:00
020cd7f9e8 Merge pull request #403 from meilisearch/lazy-data-fetching
Criteria lazy data preparation
2019-12-13 14:57:19 +01:00
40c0b14d1c Reintroduce searchable attributes and reordering 2019-12-13 14:38:25 +01:00
a4dd033ccf Rename raw_matches into bare_matches 2019-12-13 14:38:25 +01:00
48e8778881 Clean up the modules declarations 2019-12-13 14:38:25 +01:00
4be23efe66 Remove the AttrCount type
Could probably be reintroduced later
2019-12-13 14:38:25 +01:00
7d67750865 Reintroduce exacteness for one word document field 2019-12-13 14:38:25 +01:00
746e6e170c Make the test pass again 2019-12-13 14:38:24 +01:00
d93e35cace Introduce ContextMut and Context structs 2019-12-13 14:38:24 +01:00
d75339a271 Prefer summing the attribute 2019-12-13 14:38:24 +01:00
86ee0cbd6e Introduce bucket_sort_with_distinct function 2019-12-13 14:38:24 +01:00
248ccfc0d8 Update the criteria to the new ones 2019-12-13 14:38:24 +01:00
ea148575cf Remove the raw_query functions 2019-12-13 14:38:23 +01:00
efc2be0b7b Bump the sdset dependency to 0.3.6 2019-12-13 14:38:23 +01:00
8d71112dcb Rewrite the phrase query postings lists
This simplified the multiword_rewrite_matches function a little bit.
2019-12-13 14:38:23 +01:00
dd03a6256a Debug pre filtered number of documents 2019-12-13 14:38:23 +01:00
9c03bb3428 First probably working phrase query doc filtering 2019-12-13 14:38:23 +01:00
22b19c0d93 Fix the processed distance algorithm 2019-12-13 14:38:22 +01:00
0f698d6bd9 Work in progress: Bad Typo detection
I have an issue where "speakers" is split into "speaker" and "s",
when I compute the distances for the Typo criterion,
it takes "s" into account and put a distance of zero in the bucket 0
(the "speakers" bucket), therefore it reports any document matching "s"
without typos as best results.

I need to make sure to ignore "s" when its associated part "speaker"
doesn't even exist in the document and is not in the place
it should be ("speaker" followed by "s").

This is hard to think that it will had much computation time to
the Typo criterion like in the previous algorithm where I computed
the real query/words indexes based and removed the invalid ones
before sending the documents to the bucket sort.
2019-12-13 14:38:22 +01:00
4e91b31b1f Make the Typo and Words work with synonyms 2019-12-13 14:38:22 +01:00
f87c67fcad Improve the QueryEnhancer by doing a single lookup 2019-12-13 14:38:22 +01:00
902625601a Work in progress: It seems like we support synonyms, split and concat words 2019-12-13 14:38:22 +01:00
d17d4dc5ec Add more debug infos 2019-12-13 14:38:21 +01:00
ef6a4db182 Before improving fields AttrCount
Removing the fields_count fetching reduced by 2 times the serach time, we should look at lazily pulling them form the criterions in needs

ugly-test: Make the fields_count fetching lazy

Just before running the exactness criterion
2019-12-13 14:38:21 +01:00
11f3d7782d Introduce the AttrCount type 2019-12-13 14:38:21 +01:00
5b9fff6636 Merge pull request #352 from meilisearch/add-search-benchmarks
Add some criterion benchmarks to help detect regressions
2019-12-13 14:37:48 +01:00
a8272f0eef Add a benchmark github workflow 2019-12-13 14:17:40 +01:00
951f0bcb10 sqaush-me: Improve benchmarks naming 2019-12-13 14:17:40 +01:00
d8ba405baf Add some criterion benchmarks to help mesure improvements 2019-12-13 14:17:40 +01:00
70f18a8086 Merge pull request #400 from meilisearch/fix-issues
Close multiples issues on HTTP behavior
2019-12-13 10:30:42 +01:00
0b5db77511 Fix erase setting option 2019-12-13 10:22:35 +01:00
3a4130f344 Allow to index files with null or boolean 2019-12-12 19:25:05 +01:00
1ea29bb92e Fix unwrap if schema does not contain ranked attributes on a custom ranking setting 2019-12-12 16:37:46 +01:00
04d34cb8aa Search; return formated section only if it's necessary 2019-12-12 16:36:42 +01:00
bf80729e17 Update message on access forbidden 2019-12-12 15:39:32 +01:00
88b3c05155 Stop words; Do not reindex all documents if there is no documents 2019-12-12 15:31:39 +01:00
6edef07e29 HTTP delete index route; Fix error on index not found 2019-12-12 14:06:16 +01:00
5ad73fe08b Merge pull request #399 from meilisearch/rewrite-synonym-endpoint
Rewrite the synonym endpoint
2019-12-12 12:58:14 +01:00
a4f26e8e48 Rewrite the synonym endpoint 2019-12-12 12:47:02 +01:00
cc10804607 Merge pull request #395 from meilisearch/update-bitly-link
Update the bit.ly movies.json link
2019-12-10 18:13:52 +01:00
f959cd76ae Update the bit.ly movies.json link 2019-12-10 18:07:14 +01:00
dcd332e2e4 Merge pull request #396 from meilisearch/disable-windows-tests
Disable windows tests
2019-12-10 18:03:13 +01:00
f3a276d1e1 Update the workflow README.md 2019-12-10 17:56:24 +01:00
640d21a7d2 Disable the Windows tests workflow 2019-12-10 17:53:26 +01:00
216cccbfba Merge pull request #391 from meilisearch/fix-one-document-route
Do not expect a JSON value as a document indentifer
2019-12-09 21:53:04 +01:00
04d1da11f7 Do not expect a JSON value as a document indentifer 2019-12-09 21:34:40 +01:00
ee4e9dcc74 Merge pull request #388 from meilisearch/remove-synonyms-unwraps
Remove unsound unwraps from the synonym routes
2019-12-09 17:06:02 +01:00
6fef04be20 Remove unsound unwraps from the synonym routes 2019-12-09 16:54:54 +01:00
86347bff3a Merge pull request #384 from curquiza/install-script-prereleases
Change regexp in install script
2019-12-09 15:28:19 +01:00
e291d9954a Change regexp in install script to not take into acccount pre-releases 2019-12-09 15:14:25 +01:00
7a548467b9 Merge pull request #382 from curquiza/health-routes
Keep only useful routes for /health
2019-12-08 18:11:19 +01:00
06d8e00ff3 Keep only useful routes for /health 2019-12-08 17:56:33 +01:00
225f5a172d Merge pull request #381 from curquiza/update-index-httpstatus
Change HTTP status of update index route
2019-12-08 17:53:01 +01:00
e531ff2e98 Change HTTP status of update index route 2019-12-08 17:10:21 +01:00
8c8040884e Merge pull request #376 from meilisearch/windows-support
Update the actions to support Windows
2019-12-07 12:07:27 +01:00
e3611ad0e4 Update the action to test on more platforms 2019-12-07 11:57:33 +01:00
289bc6570b Update the action to publish windows binaries 2019-12-07 11:52:14 +01:00
dc1849d291 Bump heed to 0.6.1 2019-12-07 11:49:45 +01:00
17a66227f4 Merge pull request #375 from nithinkashyapn/master
Docker command updated
2019-12-06 12:11:56 +01:00
0e8b95f4bf Docker command updated
Docker does not allow Uppercase letters, throws this error 

`docker: invalid reference format: repository name must be lowercase.`
2019-12-06 16:30:37 +05:30
5b8344cfc3 Merge pull request #373 from curquiza/stop-words-deletion
Use POST instead of DELETE method to delete stops-word
2019-12-05 23:06:15 +01:00
075f4034d9 Use POST instead of DELETE method to delete stops-word 2019-12-05 18:07:56 +01:00
c616ce99a8 Merge pull request #368 from tpayet/add-push-debpkg
Add publish action to gemfury for apt pkg
2019-12-05 15:35:12 +01:00
6b9b5fda7e Add publish action to gemfury for apt pkg 2019-12-05 14:54:57 +01:00
b756fc382a Merge pull request #367 from meilisearch/support-stdin-example
Allow users to send csv files from stdin in examples
2019-12-05 12:33:18 +01:00
29fd54dcfa Allow users to send csv files from stdin in examples 2019-12-05 12:23:56 +01:00
d664e97104 Merge pull request #365 from meilisearch/update-readme
Reorder "Deploy the server" options on the README
2019-12-04 18:37:40 +01:00
4466097d44 Update readme.md; Deploy part 2019-12-04 18:16:56 +01:00
60b94d2dc1 Merge pull request #366 from tpayet/cargo-deb
Add debian package in CI
2019-12-04 18:14:10 +01:00
51636402c2 Add debian package in CI 2019-12-04 18:02:30 +01:00
fc8182d7d3 Merge pull request #363 from meilisearch/bump-version
Bump meilisearch crates to v0.8.4
2019-12-03 17:30:31 +01:00
4f87465f18 Bump meilisearch crates to v0.8.4 2019-12-03 17:22:45 +01:00
5f1586ae85 Merge pull request #360 from meilisearch/fix-readme-broken-links
Fix README broken links
2019-12-02 19:10:40 +01:00
8d3161a2cf Reorder README parts 2019-12-02 18:29:53 +01:00
8bc8214279 Fix README broken links
Thanks to @baptistejamin!
2019-12-02 16:45:27 +01:00
3ea5aa18a2 Merge pull request #359 from bidoubiwa/fix_wording_in_readme
Fix bad wording in readme file
2019-12-02 14:06:49 +01:00
c4845b78a9 Fix bad wording in readme file 2019-12-02 11:15:39 +01:00
530e913e2f Merge pull request #356 from tpayet/fix-port-readme
Fix port in README & Dockerfile
2019-11-29 19:21:55 +01:00
5917f212ba Fix port in README & Dockerfile 2019-11-29 18:03:54 +01:00
d2b1690191 Merge pull request #355 from tpayet/master
Update binary default settings
2019-11-29 15:47:04 +01:00
710b7ea091 Update default listening port to 7700 2019-11-29 15:25:26 +01:00
089579d835 Update default database directory to working directory 2019-11-29 15:25:26 +01:00
7780293ddb Merge pull request #354 from meilisearch/camelcase-updates-result
Fix updates formattings and namings
2019-11-29 15:19:45 +01:00
773a51e7d0 Rename 'update_type' to 'type' on EnqueuedUpdateResult 2019-11-29 15:09:48 +01:00
7923752513 Serialize updates results to camelCase 2019-11-29 15:05:54 +01:00
9a48091b21 Merge pull request #353 from meilisearch/bump-version
Bump meilisearch crates to v0.8.3
2019-11-29 14:13:37 +01:00
30cb60f679 Bump meilisearch crates to v0.8.3 2019-11-29 14:06:17 +01:00
08687d8dab Merge pull request #351 from meilisearch/status-failed-updates-status
Add status failed on UpdateStatus
2019-11-28 18:53:31 +01:00
3a90233a3d Add status failed on UpdateStatus 2019-11-28 18:41:11 +01:00
32483cae2d Merge pull request #347 from curquiza/installation-script
Add script for binary installation
2019-11-28 18:34:58 +01:00
d7f28e0260 Add script for binary installation 2019-11-28 18:34:12 +01:00
9640c2aaa6 Merge pull request #349 from meilisearch/bump-version
Bump meilisearch crates to v0.8.2
2019-11-28 17:23:40 +01:00
9a2b4d08e1 Bump meilisearch crates to v0.8.2 2019-11-28 17:15:13 +01:00
e91615fe59 Merge pull request #348 from meilisearch/replace-isahc-by-ureq
Replace isahc by ureq
2019-11-28 17:14:32 +01:00
aed02b2e19 Remove many dependencies from the Dockerfile 2019-11-28 17:04:01 +01:00
83ad80d9db Replace isahc by ureq 2019-11-28 16:41:42 +01:00
abdb7793fb Merge pull request #345 from tpayet/readme_changes
Clarification of readme file
2019-11-28 16:35:44 +01:00
387eb3fde3 Clarification of readme file 2019-11-28 16:28:25 +01:00
e640bc90b4 Merge pull request #343 from meilisearch/explicit-index-clear
Change the update loop to be more explicit on index clear
2019-11-28 14:48:37 +01:00
3978378152 Merge pull request #344 from tpayet/patch-1
Update README license badge
2019-11-28 14:35:50 +01:00
61e3e4f0b9 Update README license badge 2019-11-28 14:28:30 +01:00
1def56ea11 Change the update loop to be more explicit on index clear 2019-11-27 13:43:28 +01:00
6d686ac14f Merge pull request #342 from meilisearch/update-lock
Update the lock file
2019-11-27 12:49:47 +01:00
641e0d15f5 Make sure the lock file is up to date 2019-11-27 12:06:14 +01:00
71b39426c0 Update the lock file 2019-11-27 12:01:22 +01:00
57584eaccc Merge pull request #341 from meilisearch/bump-version
Bump meilisearch crates to v0.8.1
2019-11-27 11:54:39 +01:00
f6fb31c531 Bump meilisearch crates to v0.8.1 2019-11-27 11:47:27 +01:00
0cea8ce5b5 Merge pull request #340 from meilisearch/separate-updates-kvstore
Separate the update and main databases
2019-11-27 11:39:14 +01:00
d08b76a323 Separate the update and main databases
We used the heed typed transaction to make it safe (https://github.com/Kerollmops/heed/pull/27).
2019-11-27 11:29:06 +01:00
86a87d6032 Merge pull request #339 from tpayet/action-docker-tag
Update action workflow for docker tagged image
2019-11-26 19:17:19 +01:00
e534929f80 Update action workflow for docker tagged image 2019-11-26 18:18:51 +01:00
fcc154da1c Merge pull request #336 from meilisearch/rename-to-meilisearch
Rename MeiliDB into MeiliSearch
2019-11-26 14:06:01 +01:00
00d1200704 Rename the meilisearch-http binary into meilisearch 2019-11-26 11:17:30 +01:00
7cc096e0a2 Rename MeiliDB into MeiliSearch 2019-11-26 11:12:30 +01:00
58eaf78dc4 Merge pull request #335 from tpayet/github-release-action
GitHub release action
2019-11-25 19:19:08 +01:00
3be2281483 Update workflows README 2019-11-25 18:14:21 +01:00
cc06d96993 Add gh actions to release binaries 2019-11-25 17:27:15 +01:00
93c7e700bc Merge pull request #333 from tpayet/update-dockerfile
Add meilihttp_addr env variable in docker build
2019-11-25 16:41:52 +01:00
97c6757fc7 Add meilihttp_addr env variable in docker build 2019-11-25 16:30:07 +01:00
276d3f8e22 Merge pull request #332 from meilisearch/jemalloc-only-on-linux
Make jemalloc only used on linux
2019-11-25 16:13:54 +01:00
4869a88ae2 Make jemalloc only used on linux 2019-11-25 15:35:13 +01:00
ae88bc31bc Merge pull request #331 from meilisearch/enable-jemalloc-linux-only
Enable jemalloc only on linux OSs
2019-11-25 14:59:56 +01:00
8aed1d96c5 Enable jemalloc only on linux OSs 2019-11-25 14:51:47 +01:00
c93949474c Merge pull request #330 from tpayet/fix-actions-badge-link
Update action badge link
2019-11-25 13:51:07 +01:00
8cf19f1c6b Update action badge link 2019-11-25 13:44:20 +01:00
a82ecb3cef Merge pull request #324 from tpayet/gh-actions
Replace Azure CI by Github Actions
2019-11-25 13:31:15 +01:00
04c2b37d82 Remove Azure CI
Add gh actions for cargo check using rust nightly

Add readme about actions workflows

Add basic Dockerfile

Add action workflow for docker publish

Change check action to test action

Update workflow readme without rust nightly

Rename test action file

Add gh actions to push latest docker image from master

Update github action for publish docker image

Add 2 steps dockerfile based on alpine

Update readme badges to match new CI
2019-11-25 13:20:54 +01:00
ab3e8d6537 Merge pull request #314 from meilisearch/fix-number-ord
Fix the ordering functions of the Number type
2019-11-22 15:14:05 +01:00
fd185a5e6b Add a test for the SorByAttr criterion 2019-11-22 15:04:23 +01:00
d9678f0040 Fix the ordering functions of the Number type 2019-11-22 14:44:02 +01:00
840217b111 Merge pull request #321 from meilisearch/fix-create-index
Fix index creation
2019-11-22 14:10:05 +01:00
9605a2cd88 Make possible to use a custom uid and simplify the usage 2019-11-22 14:01:00 +01:00
0f86ccc035 Index UID generation makes sure to not generate the same number 2019-11-22 14:01:00 +01:00
b3b73e2276 Merge pull request #323 from meilisearch/fix-index-deletion
Fix index deletion once again
2019-11-22 14:00:19 +01:00
f241c999ad Make the CI use rust stable 2019-11-22 13:47:29 +01:00
d4d2a2303a Fix a typo on timeout_ms used for multi index search 2019-11-22 13:47:29 +01:00
c8832409ad Fix the dead lock on index deletion once again 2019-11-22 13:47:29 +01:00
98f76aa952 Merge pull request #320 from meilisearch/send-amplitude-events
Add an Amplitude analysis loop tick
2019-11-22 10:52:29 +01:00
4236632af6 Add an amplitude analysis loop tick 2019-11-21 20:28:58 +01:00
e2c98244ec Merge pull request #313 from meilisearch/fix-dead-lock
Fix dead locks when deleting indexes
2019-11-21 12:42:40 +01:00
c1cf67c008 Join updates threads after dropping the indexes lock and avoid deadlocks 2019-11-21 12:01:46 +01:00
4abea919b2 Merge pull request #311 from meilisearch/add-index-name-and-id
Add index name and change some routes request body & response
2019-11-21 11:59:14 +01:00
d60aa722c0 Allow to update expireAt and revoked on token 2019-11-21 11:49:49 +01:00
055368acd8 Fix for review 2019-11-21 11:49:49 +01:00
7f2e5d091a Rename routes /synonym to /synonyms 2019-11-20 15:33:42 +01:00
c69ae8154f Allow to receive schema update formated as SchemaBuilder 2019-11-20 15:25:34 +01:00
cd95b243bb Add the update index route 2019-11-20 15:00:06 +01:00
1f1cb1f501 Rename browse_documents into get_all_documents and always respond HTTP Ok 2019-11-20 14:18:21 +01:00
530738cfe9 Format code 2019-11-20 14:12:12 +01:00
878dd6912e Return a HTTP 401 instead of 404 if token is not found 2019-11-20 14:06:56 +01:00
5f0f699f37 Move route to clear all synonyms on DELETE /synonyms 2019-11-20 14:03:55 +01:00
ca13900699 Add async routes should return ACCEPTED status code response 2019-11-20 14:03:19 +01:00
cc97889b37 Add stop-word is now PATCH method 2019-11-20 13:56:43 +01:00
45ded0498b Format code with cargo fmt 2019-11-20 11:45:23 +01:00
d01a3944c1 Add last_update information on global /stats route 2019-11-20 11:45:22 +01:00
a0caf0d6d7 Remove unused result response on indexes_uids function 2019-11-20 11:45:22 +01:00
e22debb994 Update index updated_at information at each update callback 2019-11-20 11:45:22 +01:00
1b8df0ed8b Remove last_update from stats 2019-11-20 11:45:22 +01:00
3286a5213c Move fields frequency from common store to index main store 2019-11-20 11:45:22 +01:00
394976d330 Update list_index route to return all index information, not only list of uid 2019-11-20 11:45:22 +01:00
b95acbece0 Function generate_uid return now lowercased uid 2019-11-20 11:45:22 +01:00
c94f4dff71 Do not return update_id on IndexCreateRespnse if it's none 2019-11-20 11:45:22 +01:00
e6465f4ea1 Create a new specific route for schema 2019-11-20 11:45:22 +01:00
2b3c91aabd Update get_index_schema to allow raw response 2019-11-20 11:45:22 +01:00
e97e13ce9f Rename index_name to index_uids 2019-11-20 11:45:22 +01:00
39e2b73718 Add updatedAt on main index store 2019-11-20 11:45:22 +01:00
a90facaa41 Rename index_name by index_uid 2019-11-20 11:45:22 +01:00
5527457655 Rewrite create_index route new path, body request and response 2019-11-20 11:45:21 +01:00
076e781810 Add name, created_at and updated_at informations into main index 2019-11-20 11:45:21 +01:00
750d336018 Bump Cargo.lock meili versions 2019-11-20 11:45:21 +01:00
e8251ad45b Merge pull request #310 from meilisearch/unify-crates-version
Unify the crates versions to 0.8.0
2019-11-20 11:05:54 +01:00
963ca1e2c7 Unify the crates versions to 0.8.0 2019-11-20 10:47:32 +01:00
12a6c7d54d Merge pull request #298 from bidoubiwa/add_ranked_movies_dataset
Create a dataset where the release_date is a numeric timestamp
2019-11-20 10:46:24 +01:00
2d0fc3f9d3 Create a dataset where the release_date is a numeric timestamp 2019-11-20 10:44:32 +01:00
e554784527 Merge pull request #309 from bidoubiwa/remove_stop_words_from_settings
Removed stop words from settings route
2019-11-19 18:35:27 +01:00
2cb43fa638 Removed stop words from settings route 2019-11-19 18:21:44 +01:00
66d5309a51 Merge pull request #308 from meilisearch/improve-structopt
Introduce better argument names
2019-11-19 18:09:44 +01:00
7eeedec7eb Bump meilidb-http to v0.3.0 2019-11-19 17:50:01 +01:00
4b798c71ae Introduce new arguments and understand env vars 2019-11-19 17:50:01 +01:00
685016bfec Bump meilidb-core to v0.7.0 and meilidb-http to v0.2.0 2019-11-18 15:49:23 +01:00
d30e5f6231 Merge pull request #299 from meilisearch/default-update-callbacks
Prefer using a global update callback common to all indexes
2019-11-18 15:05:21 +01:00
e854d67a55 Remove useless routes and checks 2019-11-18 14:41:49 +01:00
23a89732a5 Prefer using a global update callback common to all indexes 2019-11-18 14:41:49 +01:00
3a1f41ebdb Merge pull request #305 from meilisearch/fix-example
Make easier to interact with compacted databases
2019-11-17 20:31:06 +01:00
f873761a27 Make easier to interact with compacted databases 2019-11-17 20:01:02 +01:00
ebf620c7f9 Merge pull request #302 from meilisearch/fix-dataset-schema
Rename the movies dataset schema file
2019-11-17 17:17:33 +01:00
8b92bc3421 Rename the movies dataset schema file 2019-11-17 16:45:13 +01:00
70a5aa61e9 Merge pull request #301 from meilisearch/separate-types
Move the main types to a separate library
2019-11-17 12:45:25 +01:00
a76169042f Make the serde and zerocopy meilidb-types dependencies optional 2019-11-17 12:30:39 +01:00
c9c3cfcee9 Move the main types to a separate library 2019-11-17 12:19:36 +01:00
2e60ac5359 Merge pull request #300 from meilisearch/update-dependencies
Do not use a forked fst dependency
2019-11-17 12:19:08 +01:00
2dd7751e09 Disable the fst MemMap feature 2019-11-17 11:43:00 +01:00
26bdabcdec Do not use a forked fst dependency 2019-11-17 11:14:01 +01:00
fc8c7ed77e Merge pull request #297 from meilisearch/improve-highlights
Improve the highlight formatted outputs
2019-11-15 14:28:27 +01:00
521c96354f Improve the highlight formatted outputs 2019-11-15 14:16:21 +01:00
9788779894 Merge pull request #296 from meilisearch/update-readme
Update the README
2019-11-14 21:32:32 +01:00
9b965764ab Update the README 2019-11-14 19:09:04 +01:00
9a5a543311 Merge pull request #290 from curquiza/deploy-doc
Add information in documentation in Deploy Server part
2019-11-13 16:06:27 +01:00
b18fb868e8 Add information in documentation in Deploy Server part 2019-11-13 15:37:21 +01:00
c734af55c0 Merge pull request #289 from curquiza/status204-delete-index
Change the HTTP status code on index deletion
2019-11-13 15:33:27 +01:00
810b328ad2 Change the HTTP status code on index deletion 2019-11-13 15:14:23 +01:00
0a8039d8d8 Merge pull request #285 from bidoubiwa/remove_catching_same_index_creation
Change the error catching on the index creation route
2019-11-13 15:13:51 +01:00
e51704c09a Remove the error catching on the index creation route when the index already exist 2019-11-13 14:42:59 +01:00
623a9012d5 Merge pull request #279 from bidoubiwa/new_slogan_and_resume
Slogan and Resume proposition
2019-11-13 14:41:21 +01:00
b9a185634f Slogan and Resume proposition 2019-11-13 14:31:22 +01:00
b46889b5f0 Merge pull request #282 from meilisearch/fix-ci-artifacts
Add the meilidb-http binary to the artifacts
2019-11-13 11:39:00 +01:00
ef9a0c07db Add the meilidb-http binary to the artifacts 2019-11-13 11:15:39 +01:00
3a6f3947c9 Merge pull request #281 from meilisearch/fix-attributes-to-search-in
Take attributes to search in into account
2019-11-12 18:45:40 +01:00
5c5f41d755 Take attributes to search in into account 2019-11-12 18:35:58 +01:00
6803a8fad0 Merge pull request #280 from meilisearch/format-updates-json
Format updates json
2019-11-12 18:35:25 +01:00
8e4b362e4d Fixed the display of enqueued updates 2019-11-12 18:21:59 +01:00
acb5e624c6 Add enqueued and processed datetimes 2019-11-12 18:21:59 +01:00
a98949ff1d Improve updates JSON format 2019-11-12 16:57:22 +01:00
f355280250 Merge pull request #278 from meilisearch/mit-license
Change the license to an MIT one
2019-11-12 14:35:32 +01:00
cee8d6a8d9 Change the license to an MIT one 2019-11-12 14:24:28 +01:00
27326ea069 Merge pull request #277 from bidoubiwa/add_cmd_to_compile
Add cmd line to compile binary
2019-11-12 13:55:54 +01:00
7bbe5aca5b Add cmd line to compile binary 2019-11-12 10:57:03 +01:00
1c4afe6d0f Merge pull request #276 from meilisearch/support-slash-tokenizer
Add support for back/slashes
2019-11-11 21:46:14 +01:00
2d8f9a9849 Add support for back/slashes 2019-11-11 21:23:08 +01:00
3f41681b18 Merge pull request #274 from meilisearch/enable-env-logger
Add env logger to enable logging
2019-11-11 19:13:33 +01:00
64791815fa Add env logger to enable logging 2019-11-11 19:03:38 +01:00
8a36571a74 Merge pull request #272 from meilisearch/fix-long-words
Ignore words that are too long
2019-11-10 20:07:22 +01:00
d18e775bec Ignore words that are too long 2019-11-10 17:44:27 +01:00
78381f1818 Merge pull request #271 from meilisearch/update-dependencies
Update Dependencies
2019-11-10 11:17:09 +01:00
7f33a01ae1 Update dependencies 2019-11-10 11:04:56 +01:00
d07d14d33a Update crossbeam-channel to 0.4.0 2019-11-10 11:03:22 +01:00
540d7886ab Merge pull request #266 from meilisearch/update-readme
Update the readme and add a Quick Start section
2019-11-09 13:21:22 +01:00
5a5d10af52 Add an image description of the gif 2019-11-09 13:12:01 +01:00
f95d077ef8 Improve the README a little bit by adding a quick start section 2019-11-09 13:12:01 +01:00
05dd99936f Add a gif to show a demo using crates.io 2019-11-09 12:59:39 +01:00
c086625773 Merge pull request #269 from meilisearch/repo-became-binary
Make the repository be a binary and version the Cargo.lock
2019-11-09 12:58:52 +01:00
dc17bebf4a Make the repository be a binary and version the Cargo.lock 2019-11-09 12:13:28 +01:00
026464b2e4 Bump meilidb-core to v0.6.5 2019-11-06 11:52:34 +01:00
bd42158a70 Merge pull request #264 from meilisearch/index-soft-deletion
Index soft deletion
2019-11-06 11:51:50 +01:00
df066f4321 Introduce a new add or update documents PUT route 2019-11-06 11:42:41 +01:00
69832e8c70 Update the http index deletion route 2019-11-06 11:42:41 +01:00
95eb6ad09a Add a test to check index soft deletion works correctly 2019-11-06 11:02:30 +01:00
f3fc0bed45 Introduce index soft deletion 2019-11-06 11:02:30 +01:00
5dd6b697b9 Bump meilidb-core to v0.6.4 2019-11-05 18:46:16 +01:00
b7d170c7d1 Merge pull request #262 from meilisearch/fix-unidecoded-emojis
Fix an highlighting problem
2019-11-05 17:04:35 +01:00
7541172d12 Make the example show highlighted areas more explicitly 2019-11-05 16:40:48 +01:00
85bf5d113c Fix an highlighting problem when query was longer than original text 2019-11-05 16:40:34 +01:00
89fd397903 Bump meilidb-core to v0.6.3 2019-11-05 15:40:04 +01:00
d8392f2f18 Merge pull request #261 from meilisearch/partial-updates
Introduce the support of partial updates
2019-11-05 15:39:02 +01:00
36b74f0efe Introduce partial updates to the update system 2019-11-05 15:23:41 +01:00
68c0a36b00 Make the deserialization support correctly optional documents 2019-11-05 15:03:18 +01:00
a127b72a74 Merge pull request #259 from meilisearch/allow-add-schema-attributes-at-end
Allow to introduce attributes only at the end of a schema
2019-11-05 12:34:11 +01:00
5782fb9e52 Test the add of attributes only at the end of a schema 2019-11-05 12:09:52 +01:00
20319f7974 Allow to introduce attributes only at the end of a schema 2019-11-05 12:09:52 +01:00
c4087e2ec2 Merge pull request #258 from meilisearch/debug-schema
Implement a better debug for the schema
2019-11-05 11:35:02 +01:00
b1d1f2f627 Implement a better debug system for the schema 2019-11-05 11:21:07 +01:00
62fe6a8263 Merge pull request #257 from meilisearch/bump-version
Bump meilidb-core/tokenizer versions
2019-11-04 17:26:01 +01:00
d88c10f3b4 Bump meilidb-tokenizer to v0.6.1 2019-11-04 17:17:06 +01:00
00f49990c7 Bump meilidb-core to v0.6.2 2019-11-04 17:16:50 +01:00
89f30ad47e Merge pull request #256 from meilisearch/fix-tokenizer
Fix the tokenizer to make it work with unicode chars
2019-11-04 17:15:17 +01:00
3b1cbed238 Check that the unidecoded words are not empty 2019-11-04 17:03:11 +01:00
4571b80a49 Update the tests 2019-11-04 16:41:58 +01:00
de2b8672d4 Make the tokenizer understand strange whitespaces/quotes 2019-11-04 16:41:58 +01:00
ccded7b429 Improve the indexer to not not deunicode before indexing
Revert of #179
2019-11-04 16:41:58 +01:00
1d4e98410a Merge pull request #255 from meilisearch/bump-version
Bump meilidb-core to v0.6.1
2019-11-04 14:47:53 +01:00
e493b27ef1 Bump meilidb-core to v0.6.1 2019-11-04 14:22:08 +01:00
70589c136f Merge pull request #253 from meilisearch/fix-updates-system
Fix the updates system
2019-11-04 13:46:37 +01:00
1c3620a7d4 Add tests to the update system 2019-11-04 13:18:07 +01:00
c2cc0704d7 Clean up the update_awaiter function 2019-11-04 11:11:58 +01:00
2a50e08bb8 Moving to heed v0.5.0 2019-11-04 10:49:27 +01:00
6b326a45d7 Fix the update system to always consume updates even if failing 2019-10-31 17:44:13 +01:00
b73874bf24 Merge pull request #252 from meilisearch/examples-specify-index-name
Allow users to specify the index name to use with examples bins
2019-10-31 17:02:00 +01:00
95c8ad0f80 Allow users to specify the index name to use with examples bins 2019-10-31 16:20:31 +01:00
996763cc52 Merge pull request #251 from meilisearch/update-heed
Moving to heed 0.3.0
2019-10-31 16:20:07 +01:00
6a8171d335 Moving to heed 0.3.0 2019-10-31 16:11:02 +01:00
2f32586dab Merge pull request #250 from meilisearch/new-http-server
Introduce a brand new HTTP server
2019-10-31 16:07:52 +01:00
db898001eb Get rid of rust-crypto and uuid 2019-10-31 15:28:37 +01:00
c2a12b661a Make it a runnable server 2019-10-31 15:27:21 +01:00
f51c49db93 Introduce the HTTP tide based library 2019-10-31 15:02:34 +01:00
1be5b0f327 Bump the meili-core/schema/tokenizer crates to 0.6.0 2019-10-31 14:05:59 +01:00
a136c62208 Merge pull request #249 from meilisearch/display-all-updates
Display enqueued along with processed updates
2019-10-31 13:53:46 +01:00
cc461b1331 Display enqueued along with processed updates 2019-10-31 12:25:52 +01:00
dbe5363672 Merge pull request #248 from meilisearch/fix-highlight-too-long
Correctly highlight when query string is too long
2019-10-30 18:19:06 +01:00
45d4361e7d Correctly highlight when query string is longer 2019-10-30 17:49:50 +01:00
b28c44cc6b Merge pull request #247 from meilisearch/bump-meilidb
Bump the meili-core/schema/tokenizer crates to 0.5.11
2019-10-30 17:48:26 +01:00
b709a7a30a Bump the meili-core/schema/tokenizer crates to 0.5.11 2019-10-30 17:40:31 +01:00
64c25bdb40 Merge pull request #246 from meilisearch/better-highlighting-area
Make the highlight system much better
2019-10-30 17:39:12 +01:00
c230f244be Make the highlight system much better 2019-10-30 17:32:29 +01:00
02af4ff113 Merge pull request #245 from meilisearch/reindex-all-documents-reduce-memory-usage
Reduce the ram consumption when re-indexing all the documents
2019-10-29 17:54:47 +01:00
4dff8a215e Reduce the ram consumption when re-indexing all the documents 2019-10-29 17:46:23 +01:00
41065305aa Merge pull request #244 from meilisearch/reintroduce-stop-words
Reintroduce stop words
2019-10-29 16:35:03 +01:00
e9dce3ce81 Add a test to ensure that the indexer support stop words 2019-10-29 16:18:06 +01:00
ff7dde7522 Make the RawIndexer support stop words 2019-10-29 16:18:06 +01:00
a226fd23c3 Introduce the stop words deletion update type 2019-10-29 16:18:06 +01:00
776673ebae Introduce the stop words addition update type 2019-10-29 15:24:09 +01:00
32d2cc3aea Merge pull request #243 from meilisearch/all-updates-results
Introduce a function to get all updates results
2019-10-29 11:45:55 +01:00
8a17fcdda5 Introduce a function to get all updates results 2019-10-29 11:37:40 +01:00
9602d7a960 Merge pull request #242 from meilisearch/accept-dup-documents
Make documents additions accept only the last duplicate document
2019-10-28 20:52:40 +01:00
ac12a4b9c9 Make documents additions accept only the last duplicate document 2019-10-28 20:40:33 +01:00
af96050944 Merge pull request #241 from meilisearch/fix-dead-locks
Fix dead locks
2019-10-28 18:20:01 +01:00
a43b37dfc1 Send channel notification when clearing documents 2019-10-28 17:58:22 +01:00
c08dcac1d4 Abort the update transaction before calling the update callback 2019-10-28 17:55:43 +01:00
a17dccd84e Merge pull request #237 from meilisearch/fix-exactness-criterion
Fix the exactness criterion algorithm
2019-10-26 18:43:10 +02:00
9a57cab3ee Fix the exactness criterion algorithm 2019-10-26 18:34:40 +02:00
751b060320 Merge pull request #238 from meilisearch/improve-highlighting
Only highlight query words areas not the whole words
2019-10-26 18:23:19 +02:00
4111b99a6d Only highlight query words areas not the whole words 2019-10-26 15:56:34 +02:00
d6fb2b56d1 Merge pull request #236 from meilisearch/reorder-automatons
Make sure that automatons group with more automatons are better
2019-10-24 15:29:16 +02:00
cb5c77e536 Make sure that automatons group with more automatons are better 2019-10-24 15:18:53 +02:00
44c89b1ea2 Merge pull request #235 from meilisearch/readme-concat-split-query-words
Add information about search concat and split query words support
2019-10-23 18:20:59 +02:00
26a285053b Add information about search concat and split query words support 2019-10-23 18:19:15 +02:00
1446a6a2d2 Merge pull request #234 from meilisearch/clear-all-update-variant
Introduce a clear all documents update
2019-10-23 16:45:37 +02:00
047eba3ff3 Introduce a clear all documents update 2019-10-23 16:39:10 +02:00
8d9d183ce6 Merge pull request #233 from meilisearch/commit-when-update-ok
Commit an update only when it is Ok
2019-10-23 16:07:48 +02:00
eb67195840 Commit an update only when it is Ok 2019-10-23 15:52:40 +02:00
93306c2326 Merge pull request #232 from meilisearch/support-splitted-words
Support splitted words
2019-10-23 13:38:16 +02:00
7d9cf8d713 Clean up the fetch algorithm 2019-10-23 12:06:21 +02:00
03eb7898e7 Introduce a basic working version of phrase query for splitting words 2019-10-23 11:40:13 +02:00
0fbd4cd632 Merge pull request #231 from meilisearch/recursive-object-indexing
Make possible to convert recursive object into strings
2019-10-22 16:20:10 +02:00
858bf359b8 Make possible to convert recursive object into strings 2019-10-22 16:02:02 +02:00
5dc8465ebd Merge pull request #181 from meilisearch/diff-schema
Make possible to update an index schema
2019-10-22 14:23:43 +02:00
0f30a221fa Introduce the reindex_all_documents indexing function 2019-10-22 14:07:27 +02:00
e86a547e93 Introduce a basic schema diff function 2019-10-21 17:57:32 +02:00
32d8b4b83f Merge pull request #230 from meilisearch/moving-to-heed
Move to heed 0.1.0
2019-10-21 13:34:06 +02:00
78535b3e33 Move to heed 0.1.0 2019-10-21 12:05:53 +02:00
6c9a238973 Merge pull request #229 from meilisearch/cargo-fmt-clippy
Cargo pass of fmt and clippy
2019-10-18 13:50:30 +02:00
cf5e228288 Update the CI to check the fmt and clippy 2019-10-18 13:33:38 +02:00
9dce41ed6b Cargo clippy pass 2019-10-18 13:30:06 +02:00
ca26a0f2e4 Cargo fmt pass 2019-10-18 13:30:06 +02:00
47d777c8f7 Merge pull request #228 from meilisearch/copy-and-compact-db
Introduce a function to copy and compact a database env
2019-10-18 13:21:55 +02:00
2ef51f7df9 Introduce a function to copy and compact a database env 2019-10-18 12:56:56 +02:00
2d7db2a80f Merge pull request #227 from meilisearch/damerau-distance-cost-1
Make the levenshtein algorithm consider transpositions to cost 1
2019-10-18 10:46:42 +02:00
526202ec8b Make the levenshtein algorithm consider transpositions to cost 1 2019-10-17 18:07:15 +02:00
86ab729356 Merge pull request #226 from meilisearch/fix-rotxn-number-documents
Use a read-only transaction to retrieve the number of documents
2019-10-17 17:39:56 +02:00
dd74af4c70 Use an RoTxn to retrieve the number of documents 2019-10-17 17:30:54 +02:00
b79a8457f9 Merge pull request #225 from meilisearch/improve-query-builder-pattern
Rework the QueryBuilder to make it easier to construct and use
2019-10-17 15:59:38 +02:00
d941c512db Rework the QueryBuilder to make it easier to construct and use 2019-10-17 14:45:21 +02:00
0ff73039e5 Merge pull request #224 from meilisearch/improve-automaton-producer
Improve the automaton producer
2019-10-17 13:51:44 +02:00
2ea3e9b081 Improve the automaton producer quality by changing the production order 2019-10-17 13:19:08 +02:00
da71821204 Make the example take the fetch-timeout-ms argument into account 2019-10-17 13:19:08 +02:00
16f0914f09 Merge pull request #223 from meilisearch/fix-update-serialization
Fix updates serialization to use serde_json instead of bincode
2019-10-17 13:05:25 +02:00
1cf6afad9a Fix updates serialization to use serde_json instead of bincode 2019-10-17 12:31:46 +02:00
261c21b057 Merge pull request #222 from meilisearch/update-readme
Update the README
2019-10-16 18:22:09 +02:00
925a22b644 Update the README 2019-10-16 18:04:45 +02:00
dc5c42821e Merge pull request #221 from meilisearch/zerocopy-lmdb
Moving to zerocopy-lmdb
2019-10-16 17:27:21 +02:00
1667e1b32f Move to zerocopy-lmdb 2019-10-16 17:12:55 +02:00
c332c7bc70 Merge pull request #220 from meilisearch/all-documents-fields-iter
Introduce an Iterator to visit all documents attributes counts
2019-10-15 15:42:30 +02:00
5e8d432614 Introduce an Iterator to visit all documents attributes counts 2019-10-15 15:27:18 +02:00
f6282ca031 Merge pull request #219 from meilisearch/current-update-id
Introduce an Index mathod to retrieve the currently processed update
2019-10-15 15:26:22 +02:00
3278d22279 Introduce an Index mathod to retrieve the currently processed update 2019-10-15 14:54:52 +02:00
c9618793e3 Merge pull request #218 from meilisearch/update-readme
Change the README to refer to LMDB instead of RocksDB
2019-10-15 11:40:10 +02:00
1ef785a9ef Change the README to refer to LMDB instead of RocksDB 2019-10-15 11:39:49 +02:00
fdc98f9ef3 Merge pull request #217 from meilisearch/improve-exactness-criterion
Improve the exactness criterion
2019-10-15 11:37:33 +02:00
0de37819b4 Simplify the document fields counts deletion 2019-10-15 11:17:23 +02:00
9ff92c5d15 Update the exact criterion to use the documents fields counts 2019-10-14 18:48:54 +02:00
e629f51af4 Use the documents_fileds_count store in the QueryBuilder 2019-10-14 18:48:32 +02:00
b377003192 Compute and store the number of words in documents fields 2019-10-14 14:07:10 +02:00
a7e40a78c1 Introduce the DocumentsFieldsCounts store 2019-10-14 14:06:34 +02:00
9cdda8c46a Make the RawIndexer index_text method return the number of words 2019-10-14 13:56:52 +02:00
b7ea812dcc Merge pull request #216 from meilisearch/get-ride-of-messagepack
Get ride of rust messagepack (rmp)
2019-10-11 16:41:37 +02:00
710ab2386c Get ride of rust messagepack (rmp) 2019-10-11 16:17:37 +02:00
81bf6d583d Merge pull request #214 from meilisearch/add-customs-updates
Add customs updates
2019-10-11 15:42:08 +02:00
02575a2ef6 Introduce customs updates 2019-10-11 15:33:35 +02:00
da6ab2753e Rename Update/Type SchemaUpdate into Schema 2019-10-11 13:49:17 +02:00
97de72de83 Merge pull request #213 from meilisearch/do-not-commit-ourselves
Do not commit updates, let the user do
2019-10-11 11:51:51 +02:00
12b80e08be Do not commit updates, let the user do 2019-10-11 11:29:47 +02:00
4b130fa2e5 Merge pull request #212 from meilisearch/fix-documents-ids-iter
Fix the DocumentsIdsIter and do not iter on an Option
2019-10-10 18:43:01 +02:00
9dca18f966 Fix the DocumentsIdsIter and do not iter on an Option 2019-10-10 18:32:22 +02:00
543b65b09b Merge pull request #211 from meilisearch/fix-documents-deletion-generic-param
Reemove the useless generic documents_deletion parameter
2019-10-10 17:09:49 +02:00
9eb27811b1 Remove the useless generic documents_deletion parameter 2019-10-10 16:16:53 +02:00
7c3d93e5da Merge pull request #210 from meilisearch/query-builder-with-criteria
Rename main_store into common_store
2019-10-10 15:40:56 +02:00
485480560a Add method to create a query builder along with criterion 2019-10-10 15:32:08 +02:00
0ac927794a Merge pull request #209 from meilisearch/rename-main-to-common-index
Rename main_store into common_store
2019-10-10 15:31:25 +02:00
e09d3b654d Rename main_store into common_store 2019-10-10 15:22:23 +02:00
c5af5de4f0 Merge pull request #208 from meilisearch/improve-open-or-create-index
Create two open and create index functions
2019-10-10 13:59:08 +02:00
19c22a8c5e Create two open and create index functions 2019-10-10 13:48:30 +02:00
0103c7bfd9 Merge pull request #207 from meilisearch/improve-documents-ids-iter
Improve the DocumentsIdsIter internal
2019-10-10 13:48:13 +02:00
7b26bd88c0 Improve the DocumentsIdsIter internal 2019-10-10 13:40:18 +02:00
da0168bd82 Merge pull request #206 from meilisearch/get-documents-ids
Introduce the DocumentsIds iterator
2019-10-10 10:54:21 +02:00
d1e59be46b Introduce the DocumentsIds iterator 2019-10-10 10:35:57 +02:00
9774db6011 Merge pull request #205 from meilisearch/expose-types
Expose the UpdateType
2019-10-10 10:35:42 +02:00
46c19dfc5a Expose the UpdateType 2019-10-10 10:24:41 +02:00
9ed6752573 Merge pull request #204 from meilisearch/optional-query-builder-timeout
Make the timeout QueryBuilder setting optional to and pass the tests
2019-10-09 18:17:52 +02:00
d8fdad1455 Make the timeout QueryBuilder setting optional to and pass the tests 2019-10-09 17:59:31 +02:00
f56636e1e9 Merge branch 'moving-to-lmdb' 2019-10-09 17:23:48 +02:00
03599f1fc9 Reintroduce the deep-dive and typos-ranking-rules explanations documents 2019-10-09 16:57:27 +02:00
be78ecbf9a Update the README to recall about LMDB 2019-10-09 16:55:07 +02:00
ba2b04ca89 Update ci with rust nightly only 2019-10-09 16:47:25 +02:00
121399f336 Add a movies example dataset to the repository 2019-10-09 16:46:11 +02:00
3fded51534 Update the README file to reflect the current repository 2019-10-09 16:46:11 +02:00
8f63ec39da Unrestrict static lifetime of Criterion names 2019-10-09 16:15:31 +02:00
5a1c1aeb02 Reintroduce the sort-by-attr criterion 2019-10-09 16:08:30 +02:00
6ec575f8de Use a buffered sync channel to avoid blocking the update system 2019-10-09 15:49:35 +02:00
683b6afbfb Introduce a way to filter documents with a basic syntax 2019-10-09 14:20:37 +02:00
663714bb6d Make the example return documents field in a consistent order 2019-10-09 13:48:33 +02:00
bb35ca0d40 Reintroduce the distinct and filtering of documents 2019-10-09 13:44:18 +02:00
5f3072e67e Support a basic update callback system 2019-10-09 11:45:19 +02:00
2a4707d51e Expose a function to be able to now the status of an update 2019-10-08 17:35:47 +02:00
6534a9ec1d Clean up many warning messages 2019-10-08 17:31:07 +02:00
0a5ad4db06 Move the push update functions to their related modules 2019-10-08 17:24:11 +02:00
6ee0d72c7b Expose the synonyms operation updates on the Index 2019-10-08 17:18:22 +02:00
ba32ce21d0 Introduce synonyms deletions updates 2019-10-08 17:16:48 +02:00
0e224efa46 Introduce synonyms additions updates 2019-10-08 17:06:56 +02:00
175461c13a Port all tests to the TempDatabase struct 2019-10-08 16:16:30 +02:00
c514692233 Introduce the TempDatabase in the QueryBuilder tests 2019-10-08 15:22:36 +02:00
d8d0442d63 Fix many indexing and searching related bugs 2019-10-08 14:56:14 +02:00
2236ebbd42 Introduce an example file to test indexing and searching csv documents 2019-10-08 14:48:48 +02:00
0bfba3e4ba Introduce a query_builder method on Index 2019-10-07 17:55:46 +02:00
a57a64823e Make possible to create an index and add a schema later on 2019-10-07 17:48:26 +02:00
aa05459e4f Introduce a background thread that manage updates to do 2019-10-07 16:16:04 +02:00
0615c5c52d Consume updates in the order of insertion 2019-10-07 15:00:28 +02:00
487411340a Prefix all the store names to avoid colliding with main stores 2019-10-07 10:56:55 +02:00
5139dc7f3e Let the caller commit/abort the operation 2019-10-07 10:52:45 +02:00
88d0d3931c Store the schema in the main index 2019-10-04 17:49:13 +02:00
df2ef8d2e1 Introduce update_task, popping an update and pushing the result of it 2019-10-04 17:49:13 +02:00
29229b2137 Remove the update from the database when popped out 2019-10-04 17:16:34 +02:00
851cc38216 Introduce the Database struct to manage indexes 2019-10-04 16:49:17 +02:00
effbbc7370 Load the indexes at startup 2019-10-04 13:26:33 +02:00
08e3f23408 Add the meilidb-schema/tokenizer projects 2019-10-04 10:29:44 +02:00
62a0aefe44 Make the project be a workspace 2019-10-04 10:26:32 +02:00
3476939b7e Prefer using the impl syntax 2019-10-04 10:21:09 +02:00
38e474deaf Introduce the MResult type 2019-10-03 17:33:15 +02:00
00c70d3cb5 Make the UpdatesResults store work 2019-10-03 16:54:37 +02:00
af9fd9f552 Make the Updates store work 2019-10-03 16:39:30 +02:00
0a731973b9 Made many stores do their jobs 2019-10-03 16:13:14 +02:00
c4bd13bcdf Introduce many SingleStore wrappers 2019-10-03 15:04:11 +02:00
a5bfbf244c Introduce the documents Deserializer 2019-10-03 11:49:13 +02:00
39e0d9fc4a Introduce a basically working rkv based MeiliDB 2019-10-02 17:35:18 +02:00
905bc5c1a6 Initial commit 2019-10-02 17:35:05 +02:00
0f395d43a0 Merge pull request #201 from meilisearch/updates-ids-api
Add more methods for updates process
2019-09-26 16:08:22 +02:00
0b5b7b0bf1 feat: add a method to get the current processed update id & next updates in queue 2019-09-26 15:50:16 +02:00
57dd679026 Merge pull request #199 from meilisearch/fix-soft-hard-separator
Do not consider underscores and middle dash hard separators
2019-09-24 23:09:38 +02:00
cdd69290c3 test: Make the tests work with new separator limits 2019-09-24 20:49:42 +02:00
175b3dcb75 fix: Do not consider underscores and middle dash hard 2019-09-24 20:14:20 +02:00
ca818e12a9 Merge pull request #198 from meilisearch/split-by-underscore
Support underscores and colon as split characters
2019-09-24 14:16:02 +02:00
6b9426a051 feat: Support underscore as a split character 2019-09-24 13:56:32 +02:00
cee5e50857 Merge pull request #197 from meilisearch/log-info-to-trace
Change logs in query_builder from info! to trace!
2019-09-24 13:48:46 +02:00
3fe346101b chore: change logs in query_builder from info! to trace! 2019-09-24 13:35:46 +02:00
87e5998489 Merge pull request #194 from meilisearch/set-code-public
Set code public
2019-09-19 18:25:13 +02:00
d7d1b6ff02 chore: reformat tests 2019-09-19 18:08:25 +02:00
7073b42afa feat: get update status Enqueued / Processed / Unknown 2019-09-19 18:08:14 +02:00
120d209e66 chore: set public SchemaProps values 2019-09-19 12:43:36 +02:00
62e981c6b8 chore: set public the main duration on update status 2019-09-19 12:43:36 +02:00
941302a4be chore: export ranked map 2019-09-19 12:43:36 +02:00
20f423268e chore: re-export database::Error type 2019-09-19 12:43:36 +02:00
522013425b chore: export a getter for synonyms 2019-09-19 12:43:35 +02:00
e3c413759f chore: implement deref on CommonIndex 2019-09-19 12:43:35 +02:00
6ed97d1c19 chore: re-export UpdateType/DetailedDuration/UpdateStatus 2019-09-19 12:43:35 +02:00
53ad1fc068 chore: split tests into multiples files 2019-09-19 12:43:35 +02:00
1e2ef06c5c Merge pull request #196 from meilisearch/fix-cf-handle-creation
Create the Column Family only when it doesn't already exist
2019-09-19 12:29:50 +02:00
9db86f13f3 fix: Only create the Column Family when it doesn't already exist 2019-09-19 12:02:34 +02:00
369461e635 Merge pull request #195 from meilisearch/update-readme
Update the README
2019-09-19 12:01:09 +02:00
d2d22ac76d doc: Update the README and refer to examples instead of the main binary 2019-09-19 12:00:34 +02:00
a5a19fc9dd Merge pull request #193 from meilisearch/get-documents-id
Add a method to get an iterator over all documents ids
2019-09-18 16:09:30 +02:00
a36c991897 feat: add a method to get an iterator over all documents ids 2019-09-18 15:41:06 +02:00
4f71219e17 Merge pull request #192 from meilisearch/bump-dependencies
Bump dependencies
2019-09-18 15:10:15 +02:00
69e0bae75e chore: Bump dependencies 2019-09-18 14:42:23 +02:00
1b18679950 Merge pull request #191 from meilisearch/typed-settings
Typed settings
2019-09-18 14:04:07 +02:00
e1c119b5a8 chore: add test for custom settings 2019-09-18 12:22:26 +02:00
03709910fd feat: add typed index custom settings for common uses 2019-09-18 12:22:21 +02:00
8fdb330195 Merge pull request #190 from meilisearch/bump-dependencies-versions
Bump dependency
2019-09-18 10:29:22 +02:00
59ae6458dc chore: bump dependencies 2019-09-17 18:50:44 +02:00
c10b701b9a Merge pull request #189 from meilisearch/documents-fields-repartition
Add the documents fields repartition into stats
2019-09-17 16:23:49 +02:00
80caa8b60d feat: add the documents fields repartition into stats 2019-09-17 15:56:13 +02:00
97cf5cca2a Merge pull request #188 from meilisearch/delete-index
Delete an index
2019-09-17 14:25:38 +02:00
3e76dc718b feat: delete an index and all it's associated data 2019-09-17 13:29:56 +02:00
5a17b5a63b Merge pull request #187 from meilisearch/export-snapshots
Re-export rocksdb snapshot function
2019-09-17 12:54:14 +02:00
5bc5185ac5 feat: re-export rocksdb snapshot function 2019-09-17 11:37:17 +02:00
3712fa7c24 Merge pull request #186 from meilisearch/common-db-tree
feat: expose a common DB tree for the database
2019-09-16 19:08:52 +02:00
918cc235a4 feat: expose a common DB tree for the database 2019-09-16 16:05:05 +02:00
8d24e54fa1 Merge pull request #185 from meilisearch/serde-schema
Implement De/Serialize on schema
2019-09-16 15:18:02 +02:00
35b7b58ff7 feat: Remove the Schema to/from_toml/json/bin methods 2019-09-16 14:50:38 +02:00
ffc29a319f feat: Implement De/Serialize on schema 2019-09-16 14:50:37 +02:00
ba3ac5ea7b chore: Create an internal Schema::to_builder method 2019-09-16 14:50:37 +02:00
ee6a54fe4c feat: Replace the linked-hash-map dependency by indexmap 2019-09-16 14:50:37 +02:00
f6ff79085e Merge pull request #184 from meilisearch/unify-update-types
Unify the Update and UpdateOwned types
2019-09-16 14:00:12 +02:00
bcd38c7d5a feat: Unify the Update and UpdateOwned types 2019-09-16 12:33:08 +02:00
aaeb25828f Merge pull request #183 from meilisearch/number-of-documents
Compute the number of documents on updates
2019-09-14 16:32:18 +02:00
af26c39482 test: Improve the tests of the number of documents counting 2019-09-14 15:29:46 +02:00
2006259a23 feat: Improve the number of documents counting 2019-09-14 15:26:41 +02:00
707e2f4d77 feat: Update the number of documents in the KV 2019-09-14 15:26:39 +02:00
8d8aed36a8 feat: Count the number of deleted/inserted documents 2019-09-14 15:24:39 +02:00
2658ef0176 Merge pull request #182 from meilisearch/replace-sled-by-rocksdb
Replace sled by RocksDB
2019-09-14 11:32:26 +02:00
400d542fef feat: Update the README to reflect the kv store update 2019-09-12 16:28:23 +02:00
f46868407c feat: Make RocksDB works seemlessly like sled 2019-09-05 18:43:10 +02:00
e3fa07077c feat: Introduce the CfTree and CfIter types 2019-09-05 14:53:09 +02:00
e5763e73eb chore: Prefer using const names to avoid typos 2019-09-05 13:22:53 +02:00
fd880e0a0e Merge pull request #175 from meilisearch/moving-back-to-sled
Moving back to sled
2019-09-05 13:14:48 +02:00
e33cc89846 feat: Introduce update callbacks 2019-09-05 11:48:26 +02:00
f40b373f9f feat: Introduce the UpdateStatus type 2019-09-05 11:48:26 +02:00
cd8535d410 feat: Introduce the update_status/_blocking functions 2019-09-05 11:48:25 +02:00
f07b99fe97 fix: Make the tests work with the new update system 2019-09-05 11:48:25 +02:00
f45a00df3b fix: Cloned ArcSwaps are unsynchronized versions 2019-09-05 11:46:02 +02:00
cd864c40bc feat: Make the update update serialization be based on message pack 2019-09-05 11:46:02 +02:00
91b44a2759 chore: Change the Box<Error> to be marked dyn 2019-09-05 11:46:01 +02:00
d8cd8c5def chore: Move the updates in their own module 2019-09-05 11:46:01 +02:00
b0be06540a chore: Simplify the update application 2019-09-05 11:46:01 +02:00
4deee93a55 feat: Introduce synonyms deletion using the update system 2019-09-05 11:33:11 +02:00
451c0a6d03 feat: Introduce synonyms addition using the update system 2019-09-05 11:33:10 +02:00
0db3e6c58c feat: Introduce documents deletion using the update system 2019-09-05 11:33:10 +02:00
f83d6df4ef feat: Introduce documents addition using the update system 2019-09-05 11:33:10 +02:00
5a9e25c315 feat: Introduce the UpdatesIndex type 2019-09-05 11:14:11 +02:00
50e3c2c3de chore: Upgrade the meilidb-data dependencies 2019-09-05 10:49:46 +02:00
093ee9732f Merge pull request #180 from meilisearch/store-every-document
Change the STORED attribute property by DISPLAYED
2019-09-04 14:45:00 +02:00
333189ee51 fix: Change every stored schema property by displayed 2019-09-04 11:16:36 +02:00
50b8a66794 feat: Change the STORED attribute property by DISPLAYED 2019-09-03 11:14:20 +02:00
8be3fc1a66 Merge pull request #179 from meilisearch/deunicode-before-tokenize
Improve the tokenizer by split after deunicode
2019-09-02 17:20:30 +02:00
b5503989f9 feat: Improve the tokenizer by split after deunicode 2019-09-02 16:54:54 +02:00
5b8bc09826 Merge pull request #176 from meilisearch/no-more-hanging-threads
Replace the rayon::scope by always checking time
2019-09-01 20:02:03 +02:00
c8ee21f227 feat: Replace the rayon::scope by always checking time 2019-09-01 18:52:38 +02:00
a420fbf1e8 Merge pull request #174 from meilisearch/arc-fst-sets
Do not clone probably large fst::Sets, Arc them
2019-08-30 14:52:28 +02:00
ca34c28335 feat: Do not clone probably large fst::Sets, Arc them 2019-08-30 14:37:28 +02:00
3e1b81c4ce Merge pull request #173 from meilisearch/fix-ranked-map-set
Use the right ranked-map key name
2019-08-30 14:21:14 +02:00
9b353dfda6 chore: Use const names to avoid typos 2019-08-30 12:36:10 +02:00
d8dcc6f34b fix: Use the right ranked-map key name 2019-08-30 12:21:00 +02:00
fba1272a3e Merge pull request #172 from meilisearch/expose-internal-functions
Expose some internal functions
2019-08-29 15:26:42 +02:00
e20a038970 fix: Expose some internal functions 2019-08-29 15:11:51 +02:00
6f34dccc89 Merge pull request #171 from meilisearch/stringify-document-id
Transform identifiers fields into a string before hashing it
2019-08-29 13:42:46 +02:00
f5b0eb044a fix: Transform the identifier value into a string before hashing it 2019-08-29 11:41:20 +02:00
bae86e978e Merge pull request #170 from meilisearch/async-word-index-fetching-with-rayon-scope
Async word index fetching with rayon scope
2019-08-28 14:37:38 +02:00
8030a822ab test: Add a way to setup the fetch timeout of the query-database example 2019-08-28 13:42:20 +02:00
9c5ec110e5 feat: Introduce a way to enable or disable query timeouts 2019-08-28 13:24:34 +02:00
67302d09f3 feat: Multiword rewrite while there is time 2019-08-19 11:12:23 +02:00
7dc9ea78fa feat: Make the automaton DFA construction lazy 2019-08-19 11:12:23 +02:00
0ee56314fb feat: Try to simplify Store trait bound with a rayon scope 2019-08-19 11:10:54 +02:00
b7b60b5fe5 feat: Introduce a new thread to avoid waiting on doc indexes fetchs 2019-08-16 16:35:19 +02:00
d9c9fafd78 feat: Fetch doc indexes while there is time 2019-08-16 15:01:25 +02:00
bb0a79c577 feat: Process automatons in the order they were sort 2019-08-16 12:25:35 +02:00
81d44a0854 feat: Order automatons by importance 2019-08-16 12:19:34 +02:00
ebc95cb8f2 feat: Display the documents fields in the order they were declared 2019-08-16 11:25:42 +02:00
a488c00a2e feat: Use RustyLine in the query-database example 2019-08-16 11:25:42 +02:00
bf3c2c3725 feat: Move the multi-word rewriting algorithm into its own function 2019-08-16 11:25:42 +02:00
89df496f0c feat: Separate highlights from matches to make the code easier to follow 2019-08-16 11:25:42 +02:00
9959f2e952 feat: Move the RawDocument type to its own module 2019-08-16 11:25:42 +02:00
795557c046 feat: Remove query splitting from the automaton generation 2019-08-16 11:25:42 +02:00
225a3bf184 test: Produce tests that work with the new cumulative word index system 2019-08-16 11:25:42 +02:00
e65d7418b7 feat: Remove the query index from the Automaton type 2019-08-16 11:25:42 +02:00
f478bbf826 feat: Introduce the QueryEnhancer in the query synonym system 2019-08-16 11:25:42 +02:00
5e691c2140 feat: Introduce the QueryEnhancer type 2019-08-16 11:25:42 +02:00
e0cadaa68d Merge pull request #165 from meilisearch/reorder-schema-attributes
Reorder schema attributes
2019-07-01 16:12:33 +02:00
9175e4686b feat: Collect TmpMatches only on tests, producing data useful for tests 2019-07-01 14:55:47 +02:00
e8afca614c chore: Little clean ups of meilidb-core 2019-07-01 14:34:06 +02:00
4f4b630ae9 fix: Make the examples compile with the new Highlight type 2019-07-01 12:06:17 +02:00
6b6db2f8e6 feat: Introduce the Highlight type to simplify the data oriented design 2019-07-01 12:06:16 +02:00
b7ed22bc59 feat: Introduce on the fly attributes reordering with meilidb-core 2019-07-01 12:03:31 +02:00
97cc3c7cce Merge pull request #166 from meilisearch/split-query-words
Split query words
2019-06-28 18:30:13 +02:00
f5d52396f5 feat: Support query words splits 2019-06-28 18:04:35 +02:00
9cc154da05 chore: Rewrite tests to use iterators and be easily testable 2019-06-28 18:04:35 +02:00
5aa49d232c feat: Rewrite Automaton generation related code 2019-06-28 18:04:35 +02:00
1cb42cbb30 Merge pull request #164 from meilisearch/concat-query-words
Support query words concatenation
2019-06-28 18:03:49 +02:00
9f320590d3 feat: Support query words concatenation 2019-06-27 10:14:17 +02:00
1b0fd2e0ba Merge pull request #160 from meilisearch/synonyms
Support all types of synonyms
2019-06-26 14:59:45 +02:00
b249b2a81b feat: Support removing specific synonym alternatives 2019-06-26 10:45:51 +02:00
0a5d4eb7ed feat: Normalize synonym strings and query strings to search for synonyms 2019-06-26 10:45:51 +02:00
3dcbc737f3 feat: Make synonyms be not considered like exact matches 2019-06-26 10:45:51 +02:00
43f11e929d fix: Do not trigger a synonym when its not the last word and is a prefix 2019-06-26 10:45:51 +02:00
8f2a551cca feat: Trigger synonym replacement only when the last word is tipped 2019-06-26 10:45:50 +02:00
8f044c6853 fix: Only create non-prefix DFA when generating synonyms alternatives 2019-06-26 10:45:50 +02:00
a76c00a787 feat: Create types to edit synonyms and keep them in the database 2019-06-26 10:45:50 +02:00
0633f16b4d feat: Make multi-word support multi-word synonyms 2019-06-26 10:45:50 +02:00
59fafb8b30 feat: Support one word has multi-word alternatives 2019-06-26 10:45:50 +02:00
d2bd99cc2a fix: Append DocIndexes when building InMemorySetStore from an Iterator 2019-06-26 10:45:50 +02:00
62930ecc4e feat: Deduplicate automatons when synonyms produce duplicated ones 2019-06-26 10:45:49 +02:00
6cb57aa8a4 feat: Unique word has multi-word synonyms basically work 2019-06-26 10:45:49 +02:00
9861c3878e tests: Add more tests about synonyms 2019-06-26 10:45:49 +02:00
707d7b062b feat: Made query handle synonyms via the Store 2019-06-26 10:45:49 +02:00
18736bdcd0 feat: Introduce the synonyms concept to the Store trait 2019-06-26 10:45:49 +02:00
e8b2e86007 feat: Introduce a basic way to handle synonyms 2019-06-26 10:45:48 +02:00
ae8b4f56f2 Merge pull request #163 from meilisearch/export-compute-docid
Expose a function to compute the DocumentId from an Hashable value
2019-06-25 12:25:38 +02:00
28a0074497 feat: Expose a function to compute the DocumentId from an Hashable value 2019-06-25 11:21:12 +02:00
71c039db09 Merge pull request #162 from meilisearch/trustful-hash
Prefer using a reliable SipHash to compute document ids
2019-06-22 11:51:52 +02:00
15646c258b fix: Prefer using a reliable SipHash to compute document ids 2019-06-22 11:22:21 +02:00
25a5605b35 Merge pull request #161 from meilisearch/remove-tide
Remove tide as it break compilation on the latest nightly
2019-06-18 14:04:47 +02:00
b630e32c6a fix: Remove tide as it break compilation on the latest nightly 2019-06-18 13:40:46 +02:00
c39254bf98 Merge pull request #159 from meilisearch/create-specific-schema-crate
Move the Schema to its own workspace crate
2019-06-03 09:17:14 +02:00
994a0e78f1 feat: Move the Schema to its own workspace crate 2019-05-29 15:37:28 +02:00
ab2ca15c5c Merge pull request #158 from meilisearch/moving-back-to-rocksdb
Moving back to RocksDB
2019-05-29 14:56:55 +02:00
07f447c457 feat: Force RocksDB compaction 2019-05-28 17:38:59 +02:00
62c8f1ba04 feat: Fix the index opening when index already exists 2019-05-26 11:36:47 +02:00
e08edc2d6b feat: Introduce some stats to ease debugging 2019-05-25 12:12:24 +02:00
a147c09b06 feat: Make more functions accessible on the custom settings 2019-05-24 14:37:04 +02:00
9fca74443e feat: Wrap the database index access to improve usability 2019-05-24 14:26:05 +02:00
6f258f71d5 feat: Implement some convenient accessors for custom settings 2019-05-23 15:43:41 +02:00
ce61c16dbe feat: Disable all the default RocksDB compression features 2019-05-23 15:35:53 +02:00
4c973238a1 feat: Introduce a basic RocksDB based version 2019-05-23 14:57:29 +02:00
3a8da82792 Merge pull request #157 from meilisearch/update-readme
Fix some badly spelled sentences
2019-05-22 14:01:33 +02:00
f10da122ff doc: Fix some badly spelled sentences 2019-05-22 11:41:03 +02:00
ec20a8cacb Merge pull request #156 from meilisearch/clippy-pass
Do a little clippy pass
2019-05-22 11:33:55 +02:00
102fb506db chore: Do a little clippy pass 2019-05-22 11:00:58 +02:00
34ba520f44 Merge pull request #155 from meilisearch/update-sdset
Use safest SetBuf constructor instead of new_unchecked
2019-05-21 18:23:39 +02:00
fa099555c0 feat: Use safest SetBuf constructor instead of new_unchecked 2019-05-21 18:15:48 +02:00
8387c5b14e Merge pull request #153 from meilisearch/example-expose-system-stats
Output more informations from the examples on document injection
2019-05-21 16:50:25 +02:00
5040095228 feat: Output more informations from the examples on document injection 2019-05-21 16:37:17 +02:00
788fae59a1 Merge pull request #154 from meilisearch/reintroduce-sort-by-attr
Reintroduce the `SortByAttr` custom criterion
2019-05-21 16:32:12 +02:00
e042f44e0d feat: Reintroduce the SortByAttr custom criterion 2019-05-21 16:22:23 +02:00
b1fc3e5cec Merge pull request #152 from meilisearch/documents-deletion-updates-ranked-map
Remove the documents from the ranked map on documents deletion
2019-05-21 13:59:21 +02:00
d7b1b7a2a9 feat: Remove the documents from the ranked map on documents deletion 2019-05-21 13:33:42 +02:00
97744ad24f Merge pull request #151 from meilisearch/expose-sled-compression-factor
Expose the sled compression setting
2019-05-20 15:03:43 +02:00
2e79b2a871 feat: Expose the sled compression setting 2019-05-20 14:41:15 +02:00
349f0f7068 Merge pull request #148 from meilisearch/split-fst-docindexes
Split fst doc-indexes
2019-05-20 14:24:48 +02:00
94f9587db1 feat: Implement Debug on RawDocument for more convenience 2019-05-20 11:21:41 +02:00
6df8f62022 test: Add more test to some criteria 2019-05-20 11:21:40 +02:00
8c71473498 feat: Introduce the Criterion::name to allow better debugging 2019-05-20 11:21:40 +02:00
08d89053da feat: Introduce a little simple http server for demo 2019-05-16 17:09:41 +02:00
4b36fa0739 test: Add tests about additions and deletions of documents 2019-05-16 13:44:21 +02:00
921b063a71 feat: Make the DocumentsDeletion public interface to take serde types 2019-05-16 12:04:08 +02:00
3de633c869 feat: Reexport sled to reduce user level library incompatibilities 2019-05-16 12:04:08 +02:00
021f0545eb doc: Update the deep-dive explanation text 2019-05-16 12:04:08 +02:00
b701eb85b8 doc: Update the README features links 2019-05-16 12:04:08 +02:00
4e80378a77 chore: Rename the ebay example into kaggle 2019-05-16 12:04:07 +02:00
830d2f28b9 feat: Introduce a custom tree for user custom settings 2019-05-16 12:04:07 +02:00
c5ba34d0b0 chore: Replace crate only public interface to be completely public 2019-05-16 12:04:07 +02:00
2e31bb519a chore: Split the database structure internal types 2019-05-16 12:04:07 +02:00
169bd4cb39 feat: Store all documents words by document rather than by attribute 2019-05-15 15:42:13 +02:00
aa90f22865 feat: Remove the Index dependency of the Serializer 2019-05-15 15:42:12 +02:00
9bba90c47e fix: Fix a bug in the Database open-index method 2019-05-15 15:42:12 +02:00
2844cb5bca fix: Make the examples compile 2019-05-15 15:42:12 +02:00
dff81bb161 feat: Prefer set/del methods instead of set with an Option type 2019-05-15 15:42:12 +02:00
1f2abce7c3 feat: Introduce the DocumentsDeletion type 2019-05-15 15:42:11 +02:00
e67ada8823 feat: Introduce the DocumentsAddition type 2019-05-15 15:42:11 +02:00
42e39f6eb5 feat: Introduce a simplified version of the Store trait 2019-05-15 15:42:11 +02:00
f317a7a322 feat: implement open/create_index on the Database type 2019-05-15 15:42:11 +02:00
8434ecbb43 feat: Introduce the RankedMap real type 2019-05-15 15:42:10 +02:00
0c18026240 feat: Introduce Tree wrappers for each index component 2019-05-15 15:42:10 +02:00
6eb25687f8 feat: Handle word doc-indexes sled tree errors 2019-05-15 15:42:10 +02:00
737db5668b chore: Remove the WriteToBytes trait 2019-05-15 15:42:10 +02:00
f16e0333e4 chore: Remove the SharedData/Cursor types 2019-05-15 15:42:09 +02:00
27ffcaabe9 chore: Remove the DocIndexes type 2019-05-15 15:42:09 +02:00
db031a5b95 chore: Remove the DocIds type 2019-05-15 15:42:09 +02:00
2e9fbd07cd chore: Remove most of the warnings 2019-05-15 15:42:09 +02:00
74acf83464 chore: Remove the NewIndexEvent type 2019-05-15 15:42:08 +02:00
3dc057ca9c feat: Introduce the new Index system 2019-05-15 15:42:08 +02:00
e142339106 Merge pull request #150 from felixonmars/patch-1
chore: Fix some typos
2019-05-06 15:00:53 +02:00
39038750a8 chore: Fix some typos 2019-05-06 20:12:33 +08:00
f68733bf11 Merge pull request #149 from meilisearch/ci-only-nightly
Update ci with rust nightly only
2019-05-02 15:43:53 +02:00
85edb3e90c Update ci with rust nightly only 2019-05-02 11:43:45 +02:00
d7ce6d016b Merge pull request #147 from meilisearch/moving-to-sled
Make the repository a workspace and move to sled
2019-04-29 15:21:02 +02:00
9023a12ad4 feat: Introduce the unrankable error variant 2019-04-29 14:32:04 +02:00
0547671246 feat: Take ranked attributes into account 2019-04-29 14:32:04 +02:00
068f1bc202 feat: Index unidecoded words 2019-04-29 14:32:04 +02:00
7035f76077 squash-me: Make better measurements of the retrieving spent time 2019-04-29 14:32:04 +02:00
f0268d49fe fix: Always lowercase indexed tokens 2019-04-29 14:32:04 +02:00
7dbf5d6319 fix: Make the examples build 2019-04-29 14:32:03 +02:00
ed6b6038ee feat: Finalize index merging on document insertion 2019-04-29 14:32:03 +02:00
ad24ef8a25 feat: Index words of structs, maps and tuples 2019-04-29 14:32:03 +02:00
645bab7748 feat: Index documents using the Serializer struct 2019-04-29 14:32:03 +02:00
abd7d1de48 feat: Introduce the extract_document_id function 2019-04-29 14:32:03 +02:00
ea0ee070ef feat: Introduce the Serializer
Which will serialize documents fields as message pack in the kv-store
2019-04-29 14:32:03 +02:00
2a69170f14 feat: Introduce the DocumentsDeletion type 2019-04-29 14:32:02 +02:00
725e7b4229 chore: Move the Deserializer into the the serde module 2019-04-29 14:32:02 +02:00
187e6740bd feat: Allow users to construct query builders from database indexes 2019-04-29 14:32:02 +02:00
4b40d5b0d4 feat: Introduce the Index struct 2019-04-29 14:32:02 +02:00
ee2bad20c7 feat: Store the RankedMap into the inner sled tree 2019-04-29 14:32:02 +02:00
b7805fee93 feat: Store already opened indexes and word indexes 2019-04-29 14:32:02 +02:00
0104e93ba9 feat: Introduce index events to update the WordIndex 2019-04-29 14:32:02 +02:00
25a4961453 feat: Introduce the Indexer struct 2019-04-29 14:32:01 +02:00
7338e522bd squash-me: Add set/get/del_document_attribute to Index methods 2019-04-29 14:32:01 +02:00
58c020a2e1 feat: Store the word index into the database index 2019-04-29 14:32:01 +02:00
f7eced03fd chore: Using a fork of the fst library that support Arc<[u8]> 2019-04-29 14:32:01 +02:00
9be7c02461 chore: Update sled to 0.22.1 2019-04-29 14:32:01 +02:00
9483f2df60 feat: Introduce a custom Error type 2019-04-29 14:32:01 +02:00
f17a05c342 feat: Introduce the RankedMap type 2019-04-29 14:32:00 +02:00
e41c551757 feat: Introduce the Number type 2019-04-29 14:32:00 +02:00
95dfbd1fe0 feat: Introduce the meilidb-data schema module 2019-04-29 14:32:00 +02:00
287d5dee4d feat: Introduce the meilidb-data workspace member 2019-04-29 14:32:00 +02:00
77405cc103 chore: Remove the database module from meilidb 2019-04-29 14:32:00 +02:00
abf7191eec feat: Make the Tokenizer able to support tokenizing sequences 2019-04-29 14:32:00 +02:00
c6bb2b6f9c chore: Make the debug symbols available for release binaries 2019-04-29 14:31:59 +02:00
acede0f3e8 fix: Correctly assert the DocIndex memory size 2019-04-29 14:31:59 +02:00
e56106cbdc chore: Update the toml dependency 2019-04-29 14:31:59 +02:00
87f9528791 feat: Use the new Tokenizer 2019-04-29 14:31:59 +02:00
397522f277 fet: Move meilidb example into the meilidb workspace 2019-04-29 14:31:59 +02:00
a745819ddf feat: Simplify the Tokenizer to use the LinearStrGroupBy type 2019-04-29 14:31:37 +02:00
5d5bcf7011 feat: Remove the FilterFunc alias type 2019-04-29 14:31:37 +02:00
19e67dcf0b feat: Move query splitting into the tokenizer workspace 2019-04-29 14:31:37 +02:00
1897da5348 feat: Move tokenizer things into the meilidb-tokenizer workspace 2019-04-29 14:31:37 +02:00
d8cbb03c42 chore: Update the .gitignore file 2019-04-29 14:31:36 +02:00
bc227bef21 chore: Add a nightly feature to meilidb-core 2019-04-29 14:31:36 +02:00
3bcb1dc802 chore: Allow the activation of the meilidb-core i128 feature 2019-04-29 14:31:36 +02:00
d0786b4156 chore: Move the SortByAttr into meilidb 2019-04-29 14:31:36 +02:00
14790eeae3 chore: Move index related things to the meilidb-core workspace member 2019-04-29 14:31:35 +02:00
3056b351fa Merge pull request #143 from ndudnicz/examples-movies
doc: add a new +19k movies example dataset
2019-04-15 10:11:38 +02:00
52fca57114 doc: add a new +19k movies example dataset 2019-04-13 21:11:28 +02:00
ee7a570b2f doc: Fix a little typo 2019-03-24 16:45:33 +01:00
61dcf72e04 Merge pull request #131 from meilisearch/update-readme
Add a Features section to the readme
2019-03-24 16:44:00 +01:00
bace8ad510 doc: Add a features section to the readme 2019-03-24 16:28:19 +01:00
e0b759839d Merge pull request #129 from meilisearch/ci-badge
Add CI badge
2019-03-10 22:46:57 +01:00
05b0a3e7d2 Add CI badge 2019-03-10 21:38:04 +01:00
2518037b91 Merge pull request #128 from meilisearch/azure-pipeline
Azure pipeline
2019-03-10 17:38:47 +01:00
3e452f362c Replace TravisCI by Azure CI 2019-03-10 15:46:59 +01:00
4900544574 Merge pull request #126 from Kerollmops/searchable-attributes
Searchable attributes
2019-03-05 17:11:15 +01:00
858589dc6b feat: Limit the QueryBuilder to search only into some attributes 2019-03-05 16:34:29 +01:00
915f2e70a3 Merge pull request #125 from Kerollmops/limit-memory-usage
Limit memory usage
2019-03-05 16:17:56 +01:00
aae301878c fix: Flush the database after each WriteBatch injected 2019-03-05 14:55:57 +01:00
383a49b44f fix: Compact the whole database for each WriteBatch injected 2019-03-05 14:55:57 +01:00
a45cc4b618 fix: Reduce the size of the DocIndex type 2019-03-05 14:55:57 +01:00
aef7d7825f Merge pull request #124 from Kerollmops/version-bump
Bump version to 0.3.2
2019-02-25 14:22:02 +01:00
f28ce661af chore: Bump version to 0.3.2 2019-02-25 13:56:23 +01:00
74eb9c8d0f Merge pull request #122 from Kerollmops/query-builder-no-view-dep
Remove the DatabaseView dependencies from the QueryBuilder
2019-02-24 16:56:12 +01:00
d664221c64 feat: Remove the DatabaseView dependencies from the QueryBuilder 2019-02-24 16:25:28 +01:00
58bff3d4ac Merge pull request #123 from Kerollmops/update-deps
Update all the dependencies
2019-02-24 16:24:47 +01:00
2c206eb98c chore: Update all the dependencies 2019-02-24 16:00:03 +01:00
19724e5af9 Merge pull request #121 from Kerollmops/no-cjk-unidecode
Do not save unidecoded cjk kanjis
2019-02-23 22:34:47 +01:00
c9e0ad132c feat: Do not save unidecoded cjk kanjis 2019-02-23 19:11:54 +01:00
24f265a963 Merge pull request #120 from Kerollmops/custom-log10-function
Optimize the SumOfTypos criterion
2019-02-23 19:01:12 +01:00
f8a743ee00 feat: Optimize the SumOfTypos criterion 2019-02-23 18:36:45 +01:00
64971de7ed Merge pull request #119 from Kerollmops/dont-be-hurry
Fix the tokenizer (next time don't be so hurry to merge)
2019-02-23 17:07:42 +01:00
a960c325f3 feat: Make query strings support cjk kanjis 2019-02-23 14:57:13 +01:00
a799470997 fix: Change the tokenizer to mesure cjk chars positions 2019-02-22 23:06:42 +01:00
10414791a2 fix: Remove debug println from the tokenizer 2019-02-22 22:34:37 +01:00
743974e60d Merge pull request #118 from Kerollmops/tokenizer-support-kanjis
Make the Tokenizer support Kanjis
2019-02-22 20:16:55 +01:00
0e267cae4b feat: Make the Tokenizer support Kanjis 2019-02-22 19:37:19 +01:00
12a352ae2f Merge pull request #117 from Kerollmops/tokenizer-support-parentheses
Make the tokenizer support parentheses
2019-02-22 19:36:15 +01:00
5070b27728 feat: Make the tokenizer support parentheses
Interpreting them as hard ponctuation (like a dot).
2019-02-22 18:18:17 +01:00
7a6b734078 Merge pull request #116 from Kerollmops/raw-field-value-getter
Allow users to retrieve the raw field value of a document
2019-02-22 18:02:46 +01:00
24823da6f7 feat: Allow users to retrieve the raw field value of a document 2019-02-22 15:30:20 +01:00
8701cb3a8f Merge pull request #115 from qdequele/database-path
Add accessor for database path and index path
2019-02-22 15:11:40 +01:00
315fc1fbe3 feat: Add accessor for database and index path 2019-02-22 13:49:04 +01:00
23833bac10 Merge pull request #114 from Kerollmops/hot-fix-ranked-attribute
Do not error when an attribute is registered for ranking
2019-02-21 23:17:10 +01:00
8235b6efc9 fix: Do not error when an attribute is registered for ranking 2019-02-21 20:14:08 +01:00
7f937eea5a Merge pull request #113 from Kerollmops/hot-fix-query-builder
Remove the QueryBuilder boxed criteria default static restriction
2019-02-21 20:11:10 +01:00
a1cf634ac1 feat: Remove the QueryBuilder boxed criteria default static restriction 2019-02-21 19:26:22 +01:00
c86472e997 Merge pull request #112 from Kerollmops/bump-version
Bump version to 0.3.1
2019-02-21 15:18:37 +01:00
26cb398a6f chore: Bump version to 0.3.1 2019-02-21 14:52:40 +01:00
f6e664d298 Merge pull request #111 from qdequele/config
Add a config per index
2019-02-21 14:39:37 +01:00
9437cecf87 chore: Use Default derive on Config struct 2019-02-21 14:01:55 +01:00
13309511b3 chore: Use serde derive lowercase on RankingOrdering 2019-02-21 14:01:55 +01:00
1941cb16c0 feat: Add Config.update_with(_) method to merge 2 config 2019-02-21 14:01:55 +01:00
55823c5d5d feat: add admin key on config 2019-02-21 14:01:55 +01:00
4721da1679 feat: Add access key on config 2019-02-21 14:01:55 +01:00
482f750231 chore: Set config field pub 2019-02-21 14:01:55 +01:00
d5119db165 feat: Allow to retrieve config from Database and DatabaseView 2019-02-21 14:01:55 +01:00
37578ed74f feat: store config into database 2019-02-20 14:07:19 +01:00
f5992ce822 Merge pull request #109 from Kerollmops/implement-text-cropping
Introduce text cropping that shows the first matches
2019-02-18 19:40:30 +01:00
badb0035c5 feat: Introduce text cropping that shows the first match 2019-02-18 18:59:50 +01:00
4bc14aa261 Merge pull request #108 from Kerollmops/refactor-index
Refactor the Index and Updates
2019-02-18 18:59:20 +01:00
a0c4ec0be0 feat: Introduce the updated_documents methods 2019-02-18 18:01:40 +01:00
264fffa826 feat: Replace the elapsed dependency by std::time::Instant 2019-02-17 16:37:45 +01:00
bddb37e44f feat: Move SharedData to its own module 2019-02-17 16:37:45 +01:00
6393b0cbc0 feat: Prefer binary to exponential search 2019-02-17 16:37:45 +01:00
a8df438814 feat: Implement WriteToBytes/FromSharedDataCursor 2019-02-17 16:37:44 +01:00
8014857ebf feat: Introduce the WriteToBytes trait 2019-02-17 16:37:44 +01:00
9e7261a48f feat: Introduce the FromSharedDataCursor trait 2019-02-17 16:37:44 +01:00
c4e70d0475 feat: Introduce the SharedDataCursor type 2019-02-17 16:37:44 +01:00
cbb0aaa217 feat: Introduce the Index structure along with the Events types 2019-02-17 16:36:47 +01:00
ce50e74491 Merge pull request #107 from Kerollmops/update-dependencies
Update dependencies
2019-02-13 16:05:51 +01:00
e103e1c277 chore: Replace the crossbeam::ArcCell by arc-swap::ArcSwap 2019-02-13 15:19:02 +01:00
64929fe5dc chore: Update slice-group-by to 0.2 2019-02-13 15:06:34 +01:00
b108f1e6c9 Merge pull request #106 from Kerollmops/fix-criterion
Fix the SumOfTypos and WordsProximity criteria
2019-02-12 22:06:32 +01:00
58b417e045 feat: Replace the linear_group_by by the new linear_group method 2019-02-12 21:23:36 +01:00
2e5a616d8e fix: Compute the proximity on the words with the min distance 2019-02-12 21:22:45 +01:00
092d446a7e chore: Update the slice-group-by dependency 2019-02-12 21:22:45 +01:00
85a1f126bf fix: Make the SumOfTypos criterion use a more clever algorithm 2019-02-12 21:22:42 +01:00
cf58cf86da Merge pull request #105 from Kerollmops/custom-ranking-field-into-hashmap
Save the custom ranking field into an HashMap
2019-02-11 17:36:26 +01:00
db6210c7ee feat: Introduce the Number type 2019-02-11 16:58:44 +01:00
83cd071827 feat: Introduce the SortByAttr custom ranking helper 2019-02-11 16:55:31 +01:00
084c3a95b6 feat: Add a new ranked attribute to the schema 2019-02-11 16:55:30 +01:00
78908aa34e Merge pull request #103 from Kerollmops/ranking-typo-rules
Add a reading on the default typos and ranking rules
2019-02-11 15:05:04 +01:00
cf27706f91 doc: Add a reading on the default typos and ranking rules 2019-02-11 11:58:17 +01:00
d3f53a7fd6 Merge pull request #104 from Kerollmops/update-readme
Update the Redame wrk stats
2019-02-10 14:53:15 +01:00
508af5613f doc: Update the Redame wrk stats 2019-02-10 14:05:21 +01:00
c615c31016 Merge pull request #101 from Kerollmops/version-bump
Bump version to 0.3.0
2019-02-07 15:26:38 +01:00
908b28790b chore: Bump version to 0.3.0 2019-02-07 14:51:39 +01:00
4c0279729b Merge pull request #100 from qdequele/master
Allow users to manage multiple database indexes
2019-02-07 14:49:52 +01:00
96dfac5b33 feat: Allow users to manage multiple database indexes 2019-02-07 13:05:55 +01:00
8576218b51 Merge pull request #99 from Kerollmops/simplify-transactional-update
Remove the lifetime restriction for Database Updates
2019-02-06 18:19:45 +01:00
1c1f9201b8 feat: Remove the lifetime restriction for Database Updates 2019-02-06 18:03:41 +01:00
4398b88a3a Merge pull request #98 from Kerollmops/updates-with-transactions
Change updates to be handled using the RocksDB WriteBatch feature
2019-02-06 16:13:47 +01:00
73e79f5ca4 chore: Make travis build with Rust 1.32 2019-02-06 15:58:48 +01:00
1bfd51d6e9 feat: Change updates to be handled using the RocksDB WriteBatch feature 2019-02-06 15:58:47 +01:00
0d2daf27f2 Merge pull request #97 from Kerollmops/remove-hashbrown-stop-words
Remove the hashbrown dependency for library users
2019-02-03 17:31:08 +01:00
87f0d8cf3c feat: Remove the hashbrown dependency for library users 2019-02-03 12:22:50 +01:00
06d5a10902 Merge pull request #96 from Kerollmops/chore
Make some little changes
2019-02-03 11:55:06 +01:00
94b89c5439 chore: Make the Document from_raw method private 2019-02-03 11:24:44 +01:00
c5e951be09 chore: Move the deseserializer into the serde module 2019-02-03 11:24:44 +01:00
66ae5c8161 chore: Clarify some QueryBuilder comments 2019-02-03 11:24:44 +01:00
8438e2202f Merge pull request #95 from Kerollmops/fix-querybuilder-with-criteria
Make the QueryBuilder with_criteria use FilterFunc
2019-02-03 11:24:17 +01:00
7a6166d229 feat: Make the QueryBuilder with_criteria use FilterFunc 2019-02-03 10:55:16 +01:00
d46fa4b215 Merge pull request #94 from Kerollmops/data-oriented
Introduce Data Oriented design into the search algorithm
2019-02-02 15:40:10 +01:00
2bd5b4ab86 feat: Remove useless WordsProximity criterion benchmark 2019-02-02 15:12:54 +01:00
5efbc5ceb3 feat: Introduce the revisited SortBy criterion 2019-02-02 14:42:12 +01:00
2e905bac08 chore: Remove Attribute and WordArea structures 2019-02-02 14:40:15 +01:00
4c0ad5f964 feat: Simplify the Criterion Trait by removing the DatabaseView param 2019-02-02 14:40:15 +01:00
455cbf3bf4 feat: Make the search algorithm become fully data oriented 2019-02-02 14:40:14 +01:00
a3a28c56fa feat: Replace compressed Match fields by uncompressed ones 2019-02-02 14:40:14 +01:00
b0b3175641 Merge pull request #93 from Kerollmops/slice-group-by
Use the GroupBy/Mut Traits of the slice-group-by library
2019-01-30 17:52:27 +01:00
c2f0df3f73 feat: Use the GroupBy/Mut Traits of the slice-group-by library 2019-01-30 16:54:52 +01:00
820f1f9ac6 Merge pull request #91 from Kerollmops/warn-reused-document-id
Emit warnings when a document id is reused
2019-01-28 21:05:42 +01:00
337aee5b65 chore: Emit warnings when a document id is reused 2019-01-28 16:11:55 +01:00
810dfdf656 Merge pull request #90 from Kerollmops/version-bump
Bump version to 0.2.1
2019-01-25 17:08:53 +01:00
f016652fca chore: Bump version to 0.2.1 2019-01-25 16:41:08 +01:00
6c99ebe3fa Merge pull request #89 from Kerollmops/no-more-compaction
Remove the manual compaction triggering
2019-01-25 16:40:08 +01:00
94d357985f feat: Remove the manual compaction triggering 2019-01-25 16:05:56 +01:00
fbc698567a Merge pull request #87 from Kerollmops/measure-index-loading
Display index loading times
2019-01-24 14:07:11 +01:00
aa9db14c09 chore: Display index loading times 2019-01-23 11:19:44 +01:00
61e83a1c21 Merge pull request #86 from Kerollmops/measure-indexation
Display timings of indexation operations
2019-01-16 13:32:44 +01:00
1316be5b09 chore: Display timings of indexation operations 2019-01-16 11:45:33 +01:00
4e8b0383dd Merge pull request #85 from Kerollmops/debug-more-stats
Display more stats infos
2019-01-15 14:20:28 +01:00
4fa10753c1 chore: Display more stats infos 2019-01-14 21:18:46 +01:00
2473e289e8 Merge pull request #84 from qdequele/create-server-example
Example HTTP server example can use stopwords
2019-01-14 18:55:58 +01:00
e0e5e87ed3 feat: HTTP server example can use stopwords 2019-01-14 18:21:58 +01:00
b13e61f40a Merge pull request #83 from qdequele/create-server-example
Create an example of HTTP server managing multiple databases
2019-01-14 14:35:33 +01:00
c023cb3065 feat: Create an example for HTTP server managing multiple databases 2019-01-14 13:39:54 +01:00
0a3d069fbc Merge pull request #79 from qdequele/master
Schema can be de/serialized from a json format
2019-01-12 21:50:02 +01:00
fa062ce2cf feat: Schema can be de/serialized from a json format 2019-01-12 21:05:48 +01:00
cdc6e47bf5 Merge pull request #81 from Kerollmops/update-readme
Simplify the examples command lines
2019-01-12 13:43:42 +01:00
d5f44838be doc: Simplify the examples command lines 2019-01-12 12:56:11 +01:00
169 changed files with 162778 additions and 5671 deletions

5
.dockerignore Normal file
View File

@ -0,0 +1,5 @@
target
Dockerfile
.dockerignore
.git
.gitignore

18
.github/workflows/README.md vendored Normal file
View File

@ -0,0 +1,18 @@
# GitHub actions workflow for MeiliDB
> **Note:**
> - We do not use [cache](https://github.com/actions/cache) yet but we could use it to speed up CI
## Workflow
- On each pull request, we are triggering `cargo test`.
- On each tag, we are building:
- the tagged docker image
- the binaries for MacOS, Ubuntu, and Windows
- the debian package
- On each stable release, we are build the latest docker image.
## Problems
- We do not test on Windows because we are unable to make it work, there is a disk space problem.

View File

@ -0,0 +1,16 @@
name: Check if the CHANGELOG.md has been updated
on: [pull_request]
jobs:
check:
name: Test on ${{ matrix.os }}
if: ${{ !contains(github.event.pull_request.labels.*.name, 'ignore-changelog') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Checking the CHANGELOG.md has been updated in this PR
run: |
set -e
git fetch origin ${{ github.base_ref }}
git diff --name-only origin/${{ github.base_ref }} | grep -q CHANGELOG.md

87
.github/workflows/publish-binaries.yml vendored Normal file
View File

@ -0,0 +1,87 @@
name: Publish binaries to GitHub release
on:
push:
tags:
- '*'
jobs:
publish:
name: Publish for ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
include:
- os: ubuntu-latest
artifact_name: meilisearch
asset_name: meilisearch-linux-amd64
- os: macos-latest
artifact_name: meilisearch
asset_name: meilisearch-macos-amd64
- os: windows-latest
artifact_name: meilisearch
asset_name: meilisearch-windows-amd64
steps:
- uses: hecrj/setup-rust-action@master
with:
rust-version: stable
- uses: actions/checkout@v1
- name: Build
run: cargo build --release --locked
- name: Upload binaries to release
uses: svenstaro/upload-release-action@v1-release
with:
repo_token: ${{ secrets.PUBLISH_TOKEN }}
file: target/release/${{ matrix.artifact_name }}
asset_name: ${{ matrix.asset_name }}
tag: ${{ github.ref }}
publish-armv7:
name: Publish for ARMv7
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v1.0.0
- uses: uraimo/run-on-arch-action@v1.0.7
id: runcmd
with:
architecture: armv7
distribution: ubuntu18.04
run: |
apt update
apt install -y curl gcc make
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain stable
source $HOME/.cargo/env
cargo build --release --locked
- name: Upload the binary to release
uses: svenstaro/upload-release-action@v1-release
with:
repo_token: ${{ secrets.PUBLISH_TOKEN }}
file: target/release/meilisearch
asset_name: meilisearch-linux-armv7
tag: ${{ github.ref }}
publish-armv8:
name: Publish for ARMv8
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v1.0.0
- uses: uraimo/run-on-arch-action@v1.0.7
id: runcmd
with:
architecture: aarch64 # aka ARMv8
distribution: ubuntu18.04
run: |
apt update
apt install -y curl gcc make
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain stable
source $HOME/.cargo/env
cargo build --release --locked
- name: Upload the binary to release
uses: svenstaro/upload-release-action@v1-release
with:
repo_token: ${{ secrets.PUBLISH_TOKEN }}
file: target/release/meilisearch
asset_name: meilisearch-linux-armv8
tag: ${{ github.ref }}

View File

@ -0,0 +1,39 @@
name: Publish deb pkg to GitHub release & apt repository & Homebrew
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
jobs:
debian:
name: Publish debian packagge
runs-on: ubuntu-latest
steps:
- uses: hecrj/setup-rust-action@master
with:
rust-version: stable
- name: Install cargo-deb
run: cargo install cargo-deb
- uses: actions/checkout@v1
- name: Build deb package
run: cargo deb -p meilisearch-http -o target/debian/meilisearch.deb
- name: Upload debian pkg to release
uses: svenstaro/upload-release-action@v1-release
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: target/debian/meilisearch.deb
asset_name: meilisearch.deb
tag: ${{ github.ref }}
- name: Upload debian pkg to apt repository
run: curl -F package=@target/debian/meilisearch.deb https://${{ secrets.GEMFURY_PUSH_TOKEN }}@push.fury.io/meilisearch/
homebrew:
name: Bump Homebrew formula
runs-on: ubuntu-latest
steps:
- uses: mislav/bump-homebrew-formula-action@v1
with:
formula-name: meilisearch
env:
COMMITTER_TOKEN: ${{ secrets.HOMEBREW_COMMITTER_TOKEN }}

View File

@ -0,0 +1,19 @@
---
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
name: Publish latest image to Docker Hub
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Publish to Registry
uses: elgohr/Publish-Docker-Github-Action@master
with:
name: getmeili/meilisearch
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}

View File

@ -0,0 +1,20 @@
---
on:
push:
tags:
- '*'
name: Publish tagged image to Docker Hub
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Publish to Registry
uses: elgohr/Publish-Docker-Github-Action@master
with:
name: getmeili/meilisearch
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
tag_names: true

25
.github/workflows/test.yml vendored Normal file
View File

@ -0,0 +1,25 @@
---
on: [pull_request]
name: Test binaries with cargo test
jobs:
check:
name: Test on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
steps:
- uses: actions/checkout@v1
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
args: --locked --release

9
.gitignore vendored
View File

@ -1,7 +1,8 @@
/rocksdb
/target
/Cargo.lock
**/*.rs.bk
meilisearch-core/target
**/*.csv
**/*.json_lines
**/*.rdb
**/*.rs.bk
/*.mdb
/query-history.txt
/data.ms

View File

@ -1,22 +0,0 @@
language: rust
cache: cargo
branches:
only:
- master
matrix:
fast_finish: true
include:
# Test crates on their minimum Rust versions.
- rust: 1.31.0
name: "meilidb on 1.31.0"
script: ./ci/meilidb.sh
# Test crates on nightly Rust.
- rust: nightly
name: "meilidb on nightly"
script: ./ci/meilidb.sh

19
CHANGELOG.md Normal file
View File

@ -0,0 +1,19 @@
## v0.10.1
- Add support for floating points in filters (#640)
- Add '@' character as tokenizer separator (#607)
- Add support for filtering on arrays of strings (#611)
## v0.10
- Refined filtering (#592)
- Add the number of hits in search result (#541)
- Add support for aligned crop in search result (#543)
- Sanitize the content displayed in the web interface (#539)
- Add support of nested null, boolean and seq values (#571 and #568, #574)
- Fixed the core benchmark (#576)
- Publish an ARMv7 and ARMv8 binaries on releases (#540 and #581)
- Fixed a bug where the result of the update status after the first update was empty (#542)
- Fixed a bug where stop words were not handled correctly (#594)
- Fix CORS issues (#602)
- Support wildcard on attributes to retrieve, highlight, and crop (#549, #565, and #598)

2597
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,55 +1,11 @@
[package]
edition = "2018"
name = "meilidb"
version = "0.2.0"
authors = ["Kerollmops <renault.cle@gmail.com>"]
[dependencies]
bincode = "1.0"
byteorder = "1.2"
crossbeam = "0.6"
fst = "0.3"
hashbrown = { version = "0.1", features = ["serde"] }
lazy_static = "1.1"
levenshtein_automata = { version = "0.1", features = ["fst_automaton"] }
linked-hash-map = { version = "0.5", features = ["serde_impl"] }
log = "0.4"
sdset = "0.3"
serde = "1.0"
serde_derive = "1.0"
unidecode = "0.3"
[dependencies.toml]
git = "https://github.com/Kerollmops/toml-rs.git"
features = ["preserve_order"]
rev = "0372ba6"
[dependencies.rocksdb]
git = "https://github.com/pingcap/rust-rocksdb.git"
rev = "306e201"
[dependencies.group-by]
git = "https://github.com/Kerollmops/group-by.git"
rev = "5a113fe"
[features]
default = ["simd"]
i128 = ["bincode/i128", "byteorder/i128"]
portable = ["rocksdb/portable"]
simd = ["rocksdb/sse"]
nightly = ["hashbrown/nightly", "group-by/nightly"]
[dev-dependencies]
csv = "1.0"
elapsed = "0.1"
env_logger = "0.6"
jemallocator = "0.1"
quickcheck = "0.8"
rand = "0.6"
rand_xorshift = "0.1"
structopt = "0.2"
tempfile = "3.0"
termcolor = "1.0"
[workspace]
members = [
"meilisearch-core",
"meilisearch-http",
"meilisearch-schema",
"meilisearch-tokenizer",
"meilisearch-types",
]
[profile.release]
debug = true

27
Dockerfile Normal file
View File

@ -0,0 +1,27 @@
# Compile
FROM alpine:3.10 AS compiler
RUN apk update --quiet
RUN apk add curl
RUN apk add build-base
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
WORKDIR /meilisearch
COPY . .
ENV RUSTFLAGS="-C target-feature=-crt-static"
RUN $HOME/.cargo/bin/cargo build --release
# Run
FROM alpine:3.10
RUN apk update --quiet
RUN apk add libgcc
COPY --from=compiler /meilisearch/target/release/meilisearch .
ENV MEILI_HTTP_ADDR 0.0.0.0:7700
CMD ./meilisearch

View File

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2018 Clément Renault
Copyright (c) 2019-2020 Meili SAS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

212
README.md
View File

@ -1,60 +1,188 @@
# MeiliDB
[![Build Status](https://travis-ci.org/Kerollmops/MeiliDB.svg?branch=master)](https://travis-ci.org/Kerollmops/MeiliDB)
[![dependency status](https://deps.rs/repo/github/Kerollmops/MeiliDB/status.svg)](https://deps.rs/repo/github/Kerollmops/MeiliDB)
[![License](https://img.shields.io/github/license/Kerollmops/MeiliDB.svg)](https://github.com/Kerollmops/MeiliDB)
[![Rust 1.31+](https://img.shields.io/badge/rust-1.31+-lightgray.svg)](
https://www.rust-lang.org)
A _full-text search database_ using a key-value store internally.
It uses [RocksDB](https://github.com/facebook/rocksdb) as the internal key-value store. The key-value store allows us to handle updates and queries with small memory and CPU overheads.
You can [read the deep dive](deep-dive.md) if you want more information on the engine, it describes the whole process of generating updates and handling queries.
We will be proud if you submit issues and pull requests. You can help to grow this project and start contributing by checking [issues tagged "good-first-issue"](https://github.com/Kerollmops/MeiliDB/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22). It is a good start!
The project is only a library yet. It means that there is no binary provided yet. To get started, you can check the examples wich are made to work with the data located in the `misc/` folder.
MeiliDB will be a binary in a near future so you will be able to use it as a database out-of-the-box. We should be able to query it using a [to-be-defined](https://github.com/Kerollmops/MeiliDB/issues/38) protocol. This is our current goal, [see the milestones](https://github.com/Kerollmops/MeiliDB/milestones). In the end, the binary will be a bunch of network protocols and wrappers around the library - which will also be published on [crates.io](https://crates.io). Both the binary and the library will follow the same update cycle.
<p align="center">
<img src="assets/logo.svg" alt="MeiliSearch" width="200" height="200" />
</p>
<h1 align="center">MeiliSearch</h1>
## Performances
<h4 align="center">
<a href="https://www.meilisearch.com">Website</a> |
<a href="https://blog.meilisearch.com">Blog</a> |
<a href="https://fr.linkedin.com/company/meilisearch">LinkedIn</a> |
<a href="https://twitter.com/meilisearch">Twitter</a> |
<a href="https://docs.meilisearch.com">Documentation</a> |
<a href="https://docs.meilisearch.com/resources/faq.html">FAQ</a>
</h4>
With a database composed of _100 353_ documents with _352_ attributes each and _90_ of them indexed.
So nearly _9 million_ fields indexed for _35 million_ stored we can handle more than _1.2k req/sec_ on an Intel i7-7700 (8) @ 4.2GHz.
<p align="center">
<a href="https://github.com/meilisearch/MeiliSearch/actions"><img src="https://github.com/meilisearch/MeiliSearch/workflows/Cargo%20test/badge.svg" alt="Build Status"></a>
<a href="https://deps.rs/repo/github/meilisearch/MeiliSearch"><img src="https://deps.rs/repo/github/meilisearch/MeiliSearch/status.svg" alt="Dependency status"></a>
<a href="https://github.com/meilisearch/MeiliSearch/blob/master/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
<a href="https://slack.meilisearch.com"><img src="https://img.shields.io/badge/slack-MeiliSearch-blue.svg?logo=slack" alt="Slack"></a>
<a href="https://github.com/meilisearch/MeiliSearch/discussions" alt="Discussions"><img src="https://img.shields.io/badge/github-discussions-red" /></a>
</p>
Requests are made using [wrk](https://github.com/wg/wrk) and scripted to generate real users queries.
<p align="center">⚡ Lightning Fast, Ultra Relevant, and Typo-Tolerant Search Engine 🔍</p>
```
Running 10s test @ http://localhost:2230
2 threads and 12 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 18.86ms 49.39ms 614.89ms 95.23%
Req/Sec 620.41 59.53 790.00 65.00%
12359 requests in 10.00s, 3.26MB read
Requests/sec: 1235.54
Transfer/sec: 334.22KB
```
**MeiliSearch** is a powerful, fast, open-source, easy to use and deploy search engine. Both searching and indexing are highly customizable. Features such as typo-tolerance, filters, and synonyms are provided out-of-the-box.
For more information about features go to [our documentation](https://docs.meilisearch.com/).
### Notes
<p align="center">
<a href="https://crates.meilisearch.com"><img src="assets/crates-io-demo.gif" alt="crates.io demo gif" /></a>
</p>
The default Rust allocator has recently been [changed to use the system allocator](https://github.com/rust-lang/rust/pull/51241/).
We have seen much better performances when [using jemalloc as the global allocator](https://github.com/alexcrichton/jemallocator#documentation).
> MeiliSearch helps the Rust community find crates on [crates.meilisearch.com](https://crates.meilisearch.com)
## Usage and examples
## Features
* Search as-you-type experience (answers < 50 milliseconds)
* Full-text search
* Typo tolerant (understands typos and miss-spelling)
* Supports Kanji characters
* Supports Synonym
* Easy to install, deploy, and maintain
* Whole documents are returned
* Highly customizable
* RESTful API
MeiliDB runs with an index like most search engines.
So to test the library you can create one by indexing a simple csv file.
## Get started
### Deploy the Server
#### Run it using Docker
```bash
cargo run --release --example create-database -- test.mdb misc/kaggle.csv --schema schema-example.toml --stop-words misc/fr.stopwords.txt
docker run -p 7700:7700 -v $(pwd)/data.ms:/data.ms getmeili/meilisearch
```
Once the command is executed, the index should be in the `test.mdb` folder. You are now able to run the `query-database` example and play with MeiliDB.
#### Installing with Homebrew
```bash
cargo run --release --example query-database -- test.mdb -n 10 id title
brew update && brew install meilisearch
meilisearch
```
#### Installing with APT
```bash
echo "deb [trusted=yes] https://apt.fury.io/meilisearch/ /" > /etc/apt/sources.list.d/fury.list
apt update && apt install meilisearch-http
meilisearch
```
#### Download the binary
```bash
curl -L https://install.meilisearch.com | sh
./meilisearch
```
#### Compile and run it from sources
If you have the Rust toolchain already installed on your local system, clone the repository and change it to your working directory.
```bash
git clone https://github.com/meilisearch/MeiliSearch.git
cd MeiliSearch
```
In the cloned repository, compile MeiliSearch.
```bash
cargo run --release
```
### Create an Index and Upload Some Documents
Let's create an index! If you need a sample dataset, use [this movie database](https://www.notion.so/meilisearch/A-movies-dataset-to-test-Meili-1cbf7c9cfa4247249c40edfa22d7ca87#b5ae399b81834705ba5420ac70358a65). You can also find it in the `datasets/` directory.
```bash
curl -L 'https://bit.ly/2PAcw9l' -o movies.json
```
MeiliSearch can serve multiple indexes, with different kinds of documents.
It is required to create an index before sending documents to it.
```bash
curl -i -X POST 'http://127.0.0.1:7700/indexes' --data '{ "name": "Movies", "uid": "movies" }'
```
Now that the server knows about your brand new index, you're ready to send it some data.
```bash
curl -i -X POST 'http://127.0.0.1:7700/indexes/movies/documents' \
--header 'content-type: application/json' \
--data-binary @movies.json
```
### Search for Documents
#### In command line
The search engine is now aware of your documents and can serve those via an HTTP server.
The [`jq` command-line tool](https://stedolan.github.io/jq/) can greatly help you read the server responses.
```bash
curl 'http://127.0.0.1:7700/indexes/movies/search?q=botman+robin&limit=2' | jq
```
```json
{
"hits": [
{
"id": "415",
"title": "Batman & Robin",
"poster": "https://image.tmdb.org/t/p/w1280/79AYCcxw3kSKbhGpx1LiqaCAbwo.jpg",
"overview": "Along with crime-fighting partner Robin and new recruit Batgirl...",
"release_date": "1997-06-20",
},
{
"id": "411736",
"title": "Batman: Return of the Caped Crusaders",
"poster": "https://image.tmdb.org/t/p/w1280/GW3IyMW5Xgl0cgCN8wu96IlNpD.jpg",
"overview": "Adam West and Burt Ward returns to their iconic roles of Batman and Robin...",
"release_date": "2016-10-08",
}
],
"offset": 0,
"limit": 2,
"processingTimeMs": 1,
"query": "botman robin"
}
```
#### Use the Web Interface
We also deliver an **out-of-the-box web interface** in which you can test MeiliSearch interactively.
You can access the web interface in your web browser at the root of the server. The default URL is [http://127.0.0.1:7700](http://127.0.0.1:7700). All you need to do is open your web browser and enter MeiliSearchs address to visit it. This will lead you to a web page with a search bar that will allow you to search in the selected index.
<p align="center">
<img src="assets/movies-web-demo.gif" alt="Web interface gif" />
</p>
### Documentation
Now that your MeiliSearch server is up and running, you can learn more about how to tune your search engine in [the documentation](https://docs.meilisearch.com).
## Contributing
Hey! We're glad you're thinking about contributing to MeiliSearch! If you think something is missing or could be improved, please open issues and pull requests. If you'd like to help this project grow, we'd love to have you! To start contributing, checking [issues tagged as "good-first-issue"](https://github.com/meilisearch/MeiliSearch/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) is a good start!
### Analytic Events
Once a day, events are being sent to our Amplitude instance so we can know how many people are using MeiliSearch.<br/>
Only information about the platform on which the server runs is stored. No other information is being sent.<br/>
If this doesn't suit you, you can disable these analytics by using the `MEILI_NO_ANALYTICS` env variable.
## Contact
Feel free to contact us about any questions you may have:
* At [bonjour@meilisearch.com](mailto:bonjour@meilisearch.com): English or French is welcome! 🇬🇧 🇫🇷
* Via the chat box available on every page of [our documentation](https://docs.meilisearch.com/) and on [our landing page](https://www.meilisearch.com/).
* 🆕 Join our [GitHub Discussions forum](https://github.com/meilisearch/MeiliSearch/discussions) (BETA hype!)
* Join our [Slack community](https://slack.meilisearch.com/).
* By opening an issue.
Any suggestion or feedback is highly appreciated. Thank you for your support!

BIN
assets/crates-io-demo.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 MiB

17
assets/logo.svg Normal file
View File

@ -0,0 +1,17 @@
<svg width="360" height="360" viewBox="0 0 360 360" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="logo_main">
<rect id="Rectangle" x="107.333" y="0.150146" width="274.315" height="274.315" rx="98.8334" transform="rotate(23 107.333 0.150146)" fill="url(#paint0_linear)"/>
<path id="Rectangle_2" fill-rule="evenodd" clip-rule="evenodd" d="M61.3296 230.199C46.2224 194.608 38.6688 176.813 38.208 160.329C37.5286 136.025 47.0175 112.539 64.3891 95.5282C76.1718 83.9904 93.9669 76.4368 129.557 61.3296C165.147 46.2224 182.943 38.6688 199.427 38.208C223.731 37.5286 247.217 47.0175 264.228 64.3891C275.766 76.1718 283.319 93.9669 298.426 129.557C313.534 165.147 321.087 182.943 321.548 199.427C322.227 223.731 312.738 247.217 295.367 264.228C283.584 275.766 265.789 283.319 230.199 298.426C194.608 313.534 176.813 321.087 160.329 321.548C136.025 322.227 112.539 312.738 95.5282 295.367C83.9903 283.584 76.4368 265.789 61.3296 230.199Z" fill="url(#paint1_linear)"/>
<path id="m" fill-rule="evenodd" clip-rule="evenodd" d="M219.568 130.748C242.363 130.748 259.263 147.451 259.263 174.569V229.001H227.232V179.678C227.232 166.119 220.747 159.634 210.136 159.634C205.223 159.634 200.311 161.796 195.595 167.494C195.791 169.852 195.988 172.21 195.988 174.569V229.001H164.154V179.678C164.154 166.119 157.472 159.634 147.057 159.634C142.145 159.634 137.429 161.992 132.712 168.084V229.001H100.878V133.695H132.712V139.394C139.197 133.892 145.878 130.748 156.49 130.748C168.477 130.748 178.695 135.267 185.769 143.52C195.791 134.678 205.42 130.748 219.568 130.748Z" fill="white"/>
</g>
<defs>
<linearGradient id="paint0_linear" x1="-13.6248" y1="129.208" x2="244.49" y2="403.522" gradientUnits="userSpaceOnUse">
<stop stop-color="#E41359"/>
<stop offset="1" stop-color="#F23C79"/>
</linearGradient>
<linearGradient id="paint1_linear" x1="11.0088" y1="111.65" x2="111.65" y2="348.747" gradientUnits="userSpaceOnUse">
<stop stop-color="#24222F"/>
<stop offset="1" stop-color="#2B2937"/>
</linearGradient>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 2.0 KiB

BIN
assets/movies-web-demo.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 MiB

View File

@ -1,15 +0,0 @@
#!/bin/bash
cd "$(dirname "$0")"/..
set -ex
export RUSTFLAGS="-D warnings"
cargo check --no-default-features
cargo check --bins --examples --tests
cargo test
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
cargo check --no-default-features --features nightly
cargo test --features nightly
fi

View File

@ -0,0 +1 @@
_datas in movies.csv are from https://www.themoviedb.org/_

19700
datasets/movies/movies.csv Normal file

File diff suppressed because it is too large Load Diff

19654
datasets/movies/movies.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
{
"searchableAttributes": ["title", "overview"],
"displayedAttributes": [
"id",
"title",
"overview",
"release_date",
"poster"
]
}

View File

@ -1,140 +0,0 @@
# A deep dive in MeiliDB
On the 9 of december 2018.
MeiliDB is a full text search engine based on a final state transducer named [fst](https://github.com/BurntSushi/fst) and a key-value store named [RocksDB](https://github.com/facebook/rocksdb). The goal of a search engine is to store data and to respond to queries as accurate and fast as possible. To achieve this it must save the data as an [inverted index](https://en.wikipedia.org/wiki/Inverted_index).
<!-- MarkdownTOC autolink="true" -->
- [Where is the data stored?](#where-is-the-data-stored)
- [What does the key-value store contains?](#what-does-the-key-value-store-contains)
- [The blob type](#the-blob-type)
- [A final state transducer](#a-final-state-transducer)
- [Document indexes](#document-indexes)
- [Document ids](#document-ids)
- [The schema](#the-schema)
- [Document attributes](#document-attributes)
- [How is an update handled?](#how-is-an-update-handled)
- [The merge operation is CPU consuming](#the-merge-operation-is-cpu-consuming)
- [How is a request processed?](#how-is-a-request-processed)
- [Query lexemes](#query-lexemes)
- [Automatons and query index](#automatons-and-query-index)
- [Sort by criteria](#sort-by-criteria)
- [Retrieve original documents](#retrieve-original-documents)
<!-- /MarkdownTOC -->
## Where is the data stored?
MeiliDB is entirely backed by a key-value store like any good database (i.e. Postgres, MySQL). This brings a great flexibility in the way documents can be stored and updates handled along time.
[RocksDB brings some](https://rocksdb.org/blog/2015/02/27/write-batch-with-index.html) of the [A.C.I.D. properties](https://en.wikipedia.org/wiki/ACID_(computer_science)) to help us be sure the saved data is consistent, for example we use SST files and the key-value store ability to load them in one time to manage updates.
Note that the SST file have the same restriction as the fst, it needs its keys to be added in order at creation.
## What does the key-value store contains?
It contain the blob, the schema and the documents stored attributes.
### The blob type
[The Blob type](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/blob/mod.rs#L16-L19) is a data structure that indicate if an update is a positive or a negative one. In the case where the update is considered positive, the blob will contain [an fst map and the document indexes](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/blob/positive/blob.rs#L15-L18) associated. In the other case it will only contain [all the document ids](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/blob/negative/blob.rs#L12-L14) that must be considered removed.
The Blob type [is stored under the "*data-index*" entry](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/update/positive/update.rs#L497-L499) and marked as [a merge operation](https://github.com/facebook/rocksdb/wiki/Merge-Operator-Implementation) in the key-value store.
#### A final state transducer
_...also abbreviated fst_
This is the first entry point of the engine, you can read more about how it work with the beautiful blog post of @BurntSushi, [Index 1,600,000,000 Keys with Automata and Rust](https://blog.burntsushi.net/transducers/).
To make it short it is a powerful way to store all the words that are present in the indexed documents. You construct it by giving it all the words you want to index associated with a value that, for the moment, can only be an `u64`. When you want to search in it you can provide any automaton you want, in MeiliDB [a custom levenshtein automaton](https://github.com/tantivy-search/levenshtein-automata/) is used.
Note that the number under each word is auto-incremental, each new word have a new number that is greater than the prevous one.
Another powerful feature of `fst` is that it can nearly avoid using RAM and be streamed to disk for example, the problem is that the keys must be always added in lexicographic order, so you must sort them before, for the moment MeiliDB uses a [BTreeMap](https://github.com/Kerollmops/raptor-rs/blob/8abdb0a228e2808fe1814a6a0641a4b72d158579/src/metadata/doc_indexes.rs#L107-L112).
#### Document indexes
As it has been specified, the `fst` can only store a number corresponding to a word, an `u64`, but the goal of the search engine is to retrieve a match in a document when a query is made. You want it to return some sort of position in an attribute in a document, an information about where the given word match.
To make it possible, a custom data structure has been developed, the document indexes is composed of two arrays, the ranges array and all the docindexes corresponding to a given range, each range identify the word number. The [DocIndexes](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/data/doc_indexes.rs#L23) type is designed to be streamed when constructed, consumming a minimum amount of ram like the fst. Another advantage is that the slices are accessible in `O(1)` when you know the word associated number.
#### Document ids
This is a simple ordered list of all documents ids which must be considered deleted. It is used with [the sdset library](https://docs.rs/sdset/0.3.0/sdset/duo/struct.DifferenceByKey.html), the docindexes and the `DifferenceByKey` operation builder when merging blobs.
When a blob represent a negative update it only contains this simple slice of deleted documents ids.
### The schema
The schema is a data struture that represents which documents attributes should be stored and which should be indexed. It is stored under the "_data-schema_" entry and given to MeiliDB only at the creation.
Each document attribute is associated to a unique 32 bit number named `SchemaAttr`.
In the future this schema type could be given along with updates and probably be different from the original, the database could be able to handled this document structure and reindex it.
### Document attributes
When the engine handle a query the result that the requester want is a document, not only the [match](https://github.com/Kerollmops/MeiliDB/blob/fc2cdf92596fc002ce278e3aa8718640ac44724d/src/lib.rs#L51-L79) associated to it, fields of the original document must be returned too.
So MeiliDB again uses the power of the underlying key-value store and save the documents attributes marked as _STORE_. The key is prefixed by "_doc_" followed by the 64 bit document id in bytes and the schema attribute number in bytes corresponding to the document attribute stored.
When a document field is saved in the key-value store its value is binary encoded using the [bincode](https://docs.rs/bincode/) library, so a document must be serializable using serde.
## How is an update handled?
First of all an update in MeiliDB is nothing more than [a RocksDB SST file](https://github.com/facebook/rocksdb/wiki/Creating-and-Ingesting-SST-files). It contains the blob and all the documents attributes binary encoded like described above. Note that the blob is stored under the "_data-index_" key marked as [a merge operation](https://github.com/facebook/rocksdb/wiki/Merge-Operator-Implementation).
### The merge operation is CPU consuming
When [the database ingest an update](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/mod.rs#L108-L145) it gives the SST file to the underlying RocksDB, once it has ingested it there is a "_data-index_" entry available, we can request it but the key-value store will call a function before, a merge operation is performed.
This merge operation is done on multiple blobs as you have understood and will compute a [PositiveBlob](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/blob/positive/blob.rs#L15), this type contains the fst and document indexes structures allowing us to search for documents. This two data structures can be considered as the inverted index.
The computation time of this merge is important, RocksDB doesn't keep the previous merged result, it will call our merge operation each time until it decided to do a compaction. So [we must force this compaction earlier](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/mod.rs#L129-L131) when we receive an update to reduce this cost.
This way when we request the "_data-index_" value it will gives us the previously merged positive blob without any other merge overhead.
## How is a request processed?
Now that we have our "_data-index_" we are able to return results based on a query. In the MeiliDB universe a query is a string.
### Query lexemes
The first step to be able to call the underlying structures is to split the query in words, for that we use a [custom tokenizer](https://github.com/Kerollmops/MeiliDB/blob/fc2cdf92596fc002ce278e3aa8718640ac44724d/src/tokenizer/mod.rs) that is not finished for the moment, [there is an open issue](https://github.com/Kerollmops/MeiliDB/issues/3). Note that a tokenizer is specialized for a human language, this is the hard part.
### Automatons and query index
So to query the fst we need an automaton, in MeiliDB we use a [levenshtein automaton](https://en.wikipedia.org/wiki/Levenshtein_automaton), this automaton is constructed using a string and a maximum distance. According to the [Algolia's blog post](https://blog.algolia.com/inside-the-algolia-engine-part-3-query-processing/#algolia%e2%80%99s-way-of-searching-for-alternatives) we [created the DFAs](https://github.com/Kerollmops/MeiliDB/blob/fc2cdf92596fc002ce278e3aa8718640ac44724d/src/automaton.rs#L62-L75) with different settings.
Thanks to the power of the fst library [it is possible to union multiple automatons](https://docs.rs/fst/0.3.2/fst/map/struct.OpBuilder.html#method.union) on the same fst map, it will allow us to know which [automaton returns a word according to its index](https://github.com/Kerollmops/MeiliDB/blob/fc2cdf92596fc002ce278e3aa8718640ac44724d/src/metadata/ops.rs#L111). The `Stream` is able to return all the numbers associated to the words. We use these numbers to find the whole list of `DocIndexes` associated and do the union set operation.
With all these informations it is possible [to reconstruct a list of all the DocIndexes associated](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/rank/query_builder.rs#L62-L99) with the words queried.
### Sort by criteria
Now that we are able to get a big list of [DocIndexes](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/lib.rs#L21-L36) it is not enough to sort them by criteria, we need more informations like the levenshtein distance or the fact that a query word match exactly the word stored in the fst. So [we stuff it a little bit](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/rank/query_builder.rs#L86-L93), and aggregate all these [Matches](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/lib.rs#L47-L74) for each document. This way it will be easy to sort a simple vector of document using a bunch of functions.
With this big list of documents and associated matches [we are able to sort only the part of the slice that we want](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/rank/query_builder.rs#L108-L119) using bucket sorting. [Each criterion](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/rank/criterion/mod.rs#L75-L87) is evaluated on each subslice without copy, thanks to [GroupByMut](https://github.com/Kerollmops/group-by/blob/cab857bae01463dbd0edb99b0e0d7f3624e6c6f5/src/lib.rs#L180-L185) which, I hope [will soon be merged](https://github.com/rust-lang/rfcs/pull/2477).
Note that it is possible to customize the criteria used by using the `QueryBuilder::with_criteria` constructor, this way you can implement some custom ranking based on the document attributes using the appropriate structure and the `retrieve_document` method.
### Retrieve original documents
The [DatabaseView](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/database_view.rs#L18-L24) structure that you must have created to be able to query the database have [two functions](https://github.com/Kerollmops/MeiliDB/blob/550dc1e99224e386516877450320f694947332d4/src/database/database_view.rs#L60-L76) that allows you to retrieve a full (or not) document according to the schema you specified at creation time (i.e. the _STORED_ attributes).
As you can see, these functions force the created type `T` to implement [the serde Deserialize trait](https://docs.rs/serde/1.0.81/serde/trait.Deserialize.html), MeiliDB will use the `bincode::deserialise` function for each attribute to construct your type and return it to you.
At this point, MeiliDB work is over 🎉

129
download-latest.sh Normal file
View File

@ -0,0 +1,129 @@
#!/bin/sh
# COLORS
RED='\033[31m'
GREEN='\033[32m'
DEFAULT='\033[0m'
# GLOBALS
GREP_SEMVER_REGEXP='\"v\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\"' # i.e. "v[number].[number].[number]"
BINARY_NAME='meilisearch'
# semverParseInto and semverLT from https://github.com/cloudflare/semver_bash/blob/master/semver.sh
# usage: semverParseInto version major minor patch special
# version: the string version
# major, minor, patch, special: will be assigned by the function
semverParseInto() {
local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
#MAJOR
eval $2=`echo $1 | sed -e "s#$RE#\1#"`
#MINOR
eval $3=`echo $1 | sed -e "s#$RE#\2#"`
#MINOR
eval $4=`echo $1 | sed -e "s#$RE#\3#"`
#SPECIAL
eval $5=`echo $1 | sed -e "s#$RE#\4#"`
}
# usage: semverLT version1 version2
semverLT() {
local MAJOR_A=0
local MINOR_A=0
local PATCH_A=0
local SPECIAL_A=0
local MAJOR_B=0
local MINOR_B=0
local PATCH_B=0
local SPECIAL_B=0
semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A
semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B
if [ $MAJOR_A -lt $MAJOR_B ]; then
return 0
fi
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -lt $MINOR_B ]; then
return 0
fi
if [ $MAJOR_A -le $MAJOR_B ] && [ $MINOR_A -le $MINOR_B ] && [ $PATCH_A -lt $PATCH_B ]; then
return 0
fi
if [ "_$SPECIAL_A" == "_" ] && [ "_$SPECIAL_B" == "_" ] ; then
return 1
fi
if [ "_$SPECIAL_A" == "_" ] && [ "_$SPECIAL_B" != "_" ] ; then
return 1
fi
if [ "_$SPECIAL_A" != "_" ] && [ "_$SPECIAL_B" == "_" ] ; then
return 0
fi
if [ "_$SPECIAL_A" < "_$SPECIAL_B" ]; then
return 0
fi
return 1
}
success_usage() {
printf "$GREEN%s\n$DEFAULT" "MeiliSearch binary successfully downloaded as '$BINARY_NAME' file."
echo ''
echo 'Run it:'
echo ' $ ./meilisearch'
echo 'Usage:'
echo ' $ ./meilisearch --help'
}
failure_usage() {
printf "$RED%s\n$DEFAULT" 'ERROR: MeiliSearch binary is not available for your OS distribution yet.'
echo ''
echo 'However, you can easily compile the binary from the source files.'
echo 'Follow the steps on the docs: https://docs.meilisearch.com/advanced_guides/binary.html#how-to-compile-meilisearch'
}
# OS DETECTION
echo 'Detecting OS distribution...'
os_name=$(uname -s)
if [ "$os_name" != "Darwin" ]; then
os_name=$(cat /etc/os-release | grep '^ID=' | tr -d '"' | cut -d '=' -f 2)
fi
echo "OS distribution detected: $os_name"
case "$os_name" in
'Darwin')
os='macos'
;;
'ubuntu' | 'debian')
os='linux'
;;
*)
failure_usage
exit 1
esac
# GET LATEST VERSION
tags=$(curl -s 'https://api.github.com/repos/meilisearch/MeiliSearch/tags' \
| grep "$GREP_SEMVER_REGEXP" \
| grep 'name' \
| tr -d '"' | tr -d ',' | cut -d 'v' -f 2)
latest=""
for tag in $tags; do
if [ "$latest" = "" ]; then
latest="$tag"
else
semverLT $tag $latest
if [ $? -eq 1 ]; then
latest="$tag"
fi
fi
done
# DOWNLOAD THE LATEST
echo "Downloading MeiliSearch binary v$latest for $os..."
release_file="meilisearch-$os-amd64"
link="https://github.com/meilisearch/MeiliSearch/releases/download/v$latest/$release_file"
curl -OL "$link"
mv "$release_file" "$BINARY_NAME"
chmod 744 "$BINARY_NAME"
success_usage

View File

@ -1,138 +0,0 @@
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::error::Error;
use std::borrow::Cow;
use std::fs::File;
use hashbrown::{HashMap, HashSet};
use serde_derive::{Serialize, Deserialize};
use structopt::StructOpt;
use meilidb::database::{Database, Schema, UpdateBuilder};
use meilidb::tokenizer::DefaultBuilder;
#[derive(Debug, StructOpt)]
pub struct Opt {
/// The destination where the database must be created.
#[structopt(parse(from_os_str))]
pub database_path: PathBuf,
/// The csv file to index.
#[structopt(parse(from_os_str))]
pub csv_data_path: PathBuf,
/// The path to the schema.
#[structopt(long = "schema", parse(from_os_str))]
pub schema_path: PathBuf,
/// The path to the list of stop words (one by line).
#[structopt(long = "stop-words", parse(from_os_str))]
pub stop_words_path: Option<PathBuf>,
#[structopt(long = "update-group-size")]
pub update_group_size: Option<usize>,
}
#[derive(Serialize, Deserialize)]
struct Document<'a> (
#[serde(borrow)]
HashMap<Cow<'a, str>, Cow<'a, str>>
);
fn index(
schema: Schema,
database_path: &Path,
csv_data_path: &Path,
update_group_size: Option<usize>,
stop_words: &HashSet<String>,
) -> Result<Database, Box<Error>>
{
let database = Database::create(database_path, &schema)?;
let mut rdr = csv::Reader::from_path(csv_data_path)?;
let mut raw_record = csv::StringRecord::new();
let headers = rdr.headers()?.clone();
let mut i = 0;
let mut end_of_file = false;
while !end_of_file {
let tokenizer_builder = DefaultBuilder::new();
let update_path = tempfile::NamedTempFile::new()?;
let mut update = UpdateBuilder::new(update_path.path().to_path_buf(), schema.clone());
loop {
end_of_file = !rdr.read_record(&mut raw_record)?;
if end_of_file { break }
let document: Document = match raw_record.deserialize(Some(&headers)) {
Ok(document) => document,
Err(e) => {
eprintln!("{:?}", e);
continue;
}
};
update.update_document(&document, &tokenizer_builder, &stop_words)?;
print!("\rindexing document {}", i);
i += 1;
if let Some(group_size) = update_group_size {
if i % group_size == 0 { break }
}
}
println!();
println!("building update...");
let update = update.build()?;
println!("ingesting update...");
database.ingest_update_file(update)?;
}
Ok(database)
}
fn retrieve_stop_words(path: &Path) -> io::Result<HashSet<String>> {
let f = File::open(path)?;
let reader = BufReader::new(f);
let mut words = HashSet::new();
for line in reader.lines() {
let line = line?;
let word = line.trim().to_string();
words.insert(word);
}
Ok(words)
}
fn main() -> Result<(), Box<Error>> {
let _ = env_logger::init();
let opt = Opt::from_args();
let schema = {
let file = File::open(&opt.schema_path)?;
Schema::from_toml(file)?
};
let stop_words = match opt.stop_words_path {
Some(ref path) => retrieve_stop_words(path)?,
None => HashSet::new(),
};
let (elapsed, result) = elapsed::measure_time(|| {
index(schema, &opt.database_path, &opt.csv_data_path, opt.update_group_size, &stop_words)
});
if let Err(e) = result {
return Err(e.into())
}
println!("database created in {} at: {:?}", elapsed, opt.database_path);
Ok(())
}

View File

@ -1,170 +0,0 @@
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
use std::collections::btree_map::{BTreeMap, Entry};
use std::iter::FromIterator;
use std::io::{self, Write};
use std::path::PathBuf;
use std::error::Error;
use hashbrown::{HashMap, HashSet};
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
use structopt::StructOpt;
use meilidb::database::schema::SchemaAttr;
use meilidb::database::Database;
use meilidb::Match;
#[derive(Debug, StructOpt)]
pub struct Opt {
/// The destination where the database must be created
#[structopt(parse(from_os_str))]
pub database_path: PathBuf,
/// Fields that must be displayed.
pub displayed_fields: Vec<String>,
/// The number of returned results
#[structopt(short = "n", long = "number-results", default_value = "10")]
pub number_results: usize,
}
type Document = HashMap<String, String>;
fn display_highlights(text: &str, ranges: &[usize]) -> io::Result<()> {
let mut stdout = StandardStream::stdout(ColorChoice::Always);
let mut highlighted = false;
for range in ranges.windows(2) {
let [start, end] = match range { [start, end] => [*start, *end], _ => unreachable!() };
if highlighted {
stdout.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
}
write!(&mut stdout, "{}", &text[start..end])?;
stdout.reset()?;
highlighted = !highlighted;
}
Ok(())
}
fn char_to_byte_range(index: usize, length: usize, text: &str) -> (usize, usize) {
let mut byte_index = 0;
let mut byte_length = 0;
for (n, (i, c)) in text.char_indices().enumerate() {
if n == index {
byte_index = i;
}
if n + 1 == index + length {
byte_length = i - byte_index + c.len_utf8();
break;
}
}
(byte_index, byte_length)
}
fn create_highlight_areas(text: &str, matches: &[Match], attribute: SchemaAttr) -> Vec<usize> {
let mut byte_indexes = BTreeMap::new();
for match_ in matches {
let match_attribute = match_.attribute.attribute();
if SchemaAttr::new(match_attribute) == attribute {
let word_area = match_.word_area;
let char_index = word_area.char_index() as usize;
let char_length = word_area.length() as usize;
let (byte_index, byte_length) = char_to_byte_range(char_index, char_length, text);
match byte_indexes.entry(byte_index) {
Entry::Vacant(entry) => { entry.insert(byte_length); },
Entry::Occupied(mut entry) => {
if *entry.get() < byte_length {
entry.insert(byte_length);
}
},
}
}
}
let mut title_areas = Vec::new();
title_areas.push(0);
for (byte_index, length) in byte_indexes {
title_areas.push(byte_index);
title_areas.push(byte_index + length);
}
title_areas.push(text.len());
title_areas.sort_unstable();
title_areas
}
fn main() -> Result<(), Box<Error>> {
let _ = env_logger::init();
let opt = Opt::from_args();
let (elapsed, result) = elapsed::measure_time(|| Database::open(&opt.database_path));
let database = result?;
println!("database prepared for you in {}", elapsed);
let mut buffer = String::new();
let input = io::stdin();
loop {
print!("Searching for: ");
io::stdout().flush()?;
if input.read_line(&mut buffer)? == 0 { break }
let query = buffer.trim_end_matches('\n');
let view = database.view();
let schema = view.schema();
let (elapsed, documents) = elapsed::measure_time(|| {
let builder = view.query_builder().unwrap();
builder.query(query, 0..opt.number_results)
});
let number_of_documents = documents.len();
for doc in documents {
match view.document_by_id::<Document>(doc.id) {
Ok(document) => {
for name in &opt.displayed_fields {
let attr = match schema.attribute(name) {
Some(attr) => attr,
None => continue,
};
let text = match document.get(name) {
Some(text) => text,
None => continue,
};
print!("{}: ", name);
let areas = create_highlight_areas(&text, &doc.matches, attr);
display_highlights(&text, &areas)?;
println!();
}
},
Err(e) => eprintln!("{}", e),
}
let mut matching_attributes = HashSet::new();
for _match in doc.matches {
let attr = SchemaAttr::new(_match.attribute.attribute());
let name = schema.attribute_name(attr);
matching_attributes.insert(name);
}
let matching_attributes = Vec::from_iter(matching_attributes);
println!("matching in: {:?}", matching_attributes);
println!();
}
eprintln!("===== Found {} results in {} =====", number_of_documents, elapsed);
buffer.clear();
}
Ok(())
}

View File

@ -1,19 +0,0 @@
# This schema has been generated ...
# The order in which the attributes are declared is important,
# it specify the attribute xxx...
identifier = "id"
[attributes.id]
stored = true
[attributes.title]
stored = true
indexed = true
[attributes.description]
stored = true
indexed = true
[attributes.image]
stored = true

View File

@ -0,0 +1,53 @@
[package]
name = "meilisearch-core"
version = "0.10.1"
license = "MIT"
authors = ["Kerollmops <clement@meilisearch.com>"]
edition = "2018"
[dependencies]
arc-swap = "0.4.5"
bincode = "1.2.1"
byteorder = "1.3.4"
chrono = { version = "0.4.11", features = ["serde"] }
compact_arena = "0.4.0"
crossbeam-channel = "0.4.2"
deunicode = "1.1.0"
env_logger = "0.7.1"
fst = { version = "0.3.5", default-features = false }
hashbrown = { version = "0.7.1", features = ["serde"] }
heed = "0.7.0"
indexmap = { version = "1.3.2", features = ["serde-1"] }
intervaltree = "0.2.5"
itertools = "0.9.0"
levenshtein_automata = { version = "0.1.1", features = ["fst_automaton"] }
log = "0.4.8"
meilisearch-schema = { path = "../meilisearch-schema", version = "0.10.1" }
meilisearch-tokenizer = { path = "../meilisearch-tokenizer", version = "0.10.1" }
meilisearch-types = { path = "../meilisearch-types", version = "0.10.1" }
once_cell = "1.3.1"
ordered-float = { version = "1.0.2", features = ["serde"] }
pest = { git = "https://github.com/MarinPostma/pest.git", tag = "meilisearch-patch1" }
pest_derive = "2.0"
regex = "1.3.6"
sdset = "0.4.0"
serde = { version = "1.0.105", features = ["derive"] }
serde_json = "1.0.50"
siphasher = "0.3.2"
slice-group-by = "0.2.6"
unicase = "2.6.0"
zerocopy = "0.3.0"
[dev-dependencies]
assert_matches = "1.3.0"
criterion = "0.3.1"
csv = "1.1.3"
jemallocator = "0.3.2"
rustyline = { version = "6.0.0", default-features = false }
structopt = "0.3.12"
tempfile = "3.1.0"
termcolor = "1.1.0"
[[bench]]
name = "search_benchmark"
harness = false

View File

@ -0,0 +1,104 @@
#[cfg(test)]
#[macro_use]
extern crate assert_matches;
use std::sync::mpsc;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::iter;
use meilisearch_core::Database;
use meilisearch_core::{ProcessedUpdateResult, UpdateStatus};
use meilisearch_core::settings::{Settings, SettingsUpdate};
use meilisearch_schema::Schema;
use serde_json::Value;
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId};
fn prepare_database(path: &Path) -> Database {
let database = Database::open_or_create(path).unwrap();
let db = &database;
let (sender, receiver) = mpsc::sync_channel(100);
let update_fn = move |_name: &str, update: ProcessedUpdateResult| {
sender.send(update.update_id).unwrap()
};
let index = database.create_index("bench").unwrap();
database.set_update_callback(Box::new(update_fn));
let mut writer = db.main_write_txn().unwrap();
index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap();
writer.commit().unwrap();
let settings_update: SettingsUpdate = {
let path = concat!(env!("CARGO_MANIFEST_DIR"), "/../datasets/movies/settings.json");
let file = File::open(path).unwrap();
let reader = BufReader::new(file);
let settings: Settings = serde_json::from_reader(reader).unwrap();
settings.into_update().unwrap()
};
let mut update_writer = db.update_write_txn().unwrap();
let _update_id = index.settings_update(&mut update_writer, settings_update).unwrap();
update_writer.commit().unwrap();
let mut additions = index.documents_addition();
let json: Value = {
let path = concat!(env!("CARGO_MANIFEST_DIR"), "/../datasets/movies/movies.json");
let movies_file = File::open(path).expect("find movies");
serde_json::from_reader(movies_file).unwrap()
};
let documents = json.as_array().unwrap();
for document in documents {
additions.update_document(document);
}
let mut update_writer = db.update_write_txn().unwrap();
let update_id = additions.finalize(&mut update_writer).unwrap();
update_writer.commit().unwrap();
// block until the transaction is processed
let _ = receiver.into_iter().find(|id| *id == update_id);
let update_reader = db.update_read_txn().unwrap();
let result = index.update_status(&update_reader, update_id).unwrap();
assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none());
database
}
pub fn criterion_benchmark(c: &mut Criterion) {
let dir = tempfile::tempdir().unwrap();
let database = prepare_database(dir.path());
let reader = database.main_read_txn().unwrap();
let index = database.open_index("bench").unwrap();
let mut count = 0;
let query = "I love paris ";
let iter = iter::from_fn(|| {
count += 1;
query.get(0..count)
});
let mut group = c.benchmark_group("searching in movies (19654 docs)");
group.sample_size(10);
for query in iter {
let bench_name = BenchmarkId::from_parameter(format!("{:?}", query));
group.bench_with_input(bench_name, &query, |b, query| b.iter(|| {
let builder = index.query_builder();
builder.query(&reader, query, 0..20).unwrap();
}));
}
group.finish();
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@ -0,0 +1,473 @@
use std::collections::HashSet;
use std::collections::btree_map::{BTreeMap, Entry};
use std::error::Error;
use std::io::{Read, Write};
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant};
use std::{fs, io, sync::mpsc};
use rustyline::{Config, Editor};
use serde::{Deserialize, Serialize};
use structopt::StructOpt;
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
use meilisearch_core::{Database, Highlight, ProcessedUpdateResult};
use meilisearch_core::settings::Settings;
use meilisearch_schema::FieldId;
// #[cfg(target_os = "linux")]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
#[derive(Debug, StructOpt)]
struct IndexCommand {
/// The destination where the database must be created.
#[structopt(parse(from_os_str))]
database_path: PathBuf,
#[structopt(long, default_value = "default")]
index_uid: String,
/// The csv file path to index, you can also use `-` to specify the standard input.
#[structopt(parse(from_os_str))]
csv_data_path: PathBuf,
/// The path to the settings.
#[structopt(long, parse(from_os_str))]
settings: PathBuf,
#[structopt(long)]
update_group_size: Option<usize>,
#[structopt(long, parse(from_os_str))]
compact_to_path: Option<PathBuf>,
}
#[derive(Debug, StructOpt)]
struct SearchCommand {
/// The path of the database to work with.
#[structopt(parse(from_os_str))]
database_path: PathBuf,
#[structopt(long, default_value = "default")]
index_uid: String,
/// Timeout after which the search will return results.
#[structopt(long)]
fetch_timeout_ms: Option<u64>,
/// The number of returned results
#[structopt(short, long, default_value = "10")]
number_results: usize,
/// The number of characters before and after the first match
#[structopt(short = "C", long, default_value = "35")]
char_context: usize,
/// A filter string that can be `!adult` or `adult` to
/// filter documents on this specfied field
#[structopt(short, long)]
filter: Option<String>,
/// Fields that must be displayed.
displayed_fields: Vec<String>,
}
#[derive(Debug, StructOpt)]
struct ShowUpdatesCommand {
/// The path of the database to work with.
#[structopt(parse(from_os_str))]
database_path: PathBuf,
#[structopt(long, default_value = "default")]
index_uid: String,
}
#[derive(Debug, StructOpt)]
enum Command {
Index(IndexCommand),
Search(SearchCommand),
ShowUpdates(ShowUpdatesCommand),
}
impl Command {
fn path(&self) -> &Path {
match self {
Command::Index(command) => &command.database_path,
Command::Search(command) => &command.database_path,
Command::ShowUpdates(command) => &command.database_path,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
struct Document(indexmap::IndexMap<String, String>);
fn index_command(command: IndexCommand, database: Database) -> Result<(), Box<dyn Error>> {
let start = Instant::now();
let (sender, receiver) = mpsc::sync_channel(100);
let update_fn =
move |_name: &str, update: ProcessedUpdateResult| sender.send(update.update_id).unwrap();
let index = match database.open_index(&command.index_uid) {
Some(index) => index,
None => database.create_index(&command.index_uid).unwrap(),
};
database.set_update_callback(Box::new(update_fn));
let db = &database;
let settings = {
let string = fs::read_to_string(&command.settings)?;
let settings: Settings = serde_json::from_str(&string).unwrap();
settings.into_update().unwrap()
};
let mut update_writer = db.update_write_txn().unwrap();
index.settings_update(&mut update_writer, settings)?;
update_writer.commit().unwrap();
let mut rdr = if command.csv_data_path.as_os_str() == "-" {
csv::Reader::from_reader(Box::new(io::stdin()) as Box<dyn Read>)
} else {
let file = std::fs::File::open(command.csv_data_path)?;
csv::Reader::from_reader(Box::new(file) as Box<dyn Read>)
};
let mut raw_record = csv::StringRecord::new();
let headers = rdr.headers()?.clone();
let mut max_update_id = 0;
let mut i = 0;
let mut end_of_file = false;
while !end_of_file {
let mut additions = index.documents_addition();
loop {
end_of_file = !rdr.read_record(&mut raw_record)?;
if end_of_file {
break;
}
let document: Document = match raw_record.deserialize(Some(&headers)) {
Ok(document) => document,
Err(e) => {
eprintln!("{:?}", e);
continue;
}
};
additions.update_document(document);
print!("\rindexing document {}", i);
i += 1;
if let Some(group_size) = command.update_group_size {
if i % group_size == 0 {
break;
}
}
}
println!();
let mut update_writer = db.update_write_txn().unwrap();
println!("committing update...");
let update_id = additions.finalize(&mut update_writer)?;
update_writer.commit().unwrap();
max_update_id = max_update_id.max(update_id);
println!("committed update {}", update_id);
}
println!("Waiting for update {}", max_update_id);
for id in receiver {
if id == max_update_id {
break;
}
}
println!(
"database created in {:.2?} at: {:?}",
start.elapsed(),
command.database_path
);
if let Some(path) = command.compact_to_path {
fs::create_dir_all(&path)?;
let start = Instant::now();
let _file = database.copy_and_compact_to_path(path.join("data.mdb"))?;
println!(
"database compacted in {:.2?} at: {:?}",
start.elapsed(),
path
);
}
Ok(())
}
fn display_highlights(text: &str, ranges: &[usize]) -> io::Result<()> {
let mut stdout = StandardStream::stdout(ColorChoice::Always);
let mut highlighted = false;
for range in ranges.windows(2) {
let [start, end] = match range {
[start, end] => [*start, *end],
_ => unreachable!(),
};
if highlighted {
stdout.set_color(
ColorSpec::new()
.set_fg(Some(Color::Yellow))
.set_underline(true),
)?;
}
write!(&mut stdout, "{}", &text[start..end])?;
stdout.reset()?;
highlighted = !highlighted;
}
Ok(())
}
fn char_to_byte_range(index: usize, length: usize, text: &str) -> (usize, usize) {
let mut byte_index = 0;
let mut byte_length = 0;
for (n, (i, c)) in text.char_indices().enumerate() {
if n == index {
byte_index = i;
}
if n + 1 == index + length {
byte_length = i - byte_index + c.len_utf8();
break;
}
}
(byte_index, byte_length)
}
fn create_highlight_areas(text: &str, highlights: &[Highlight]) -> Vec<usize> {
let mut byte_indexes = BTreeMap::new();
for highlight in highlights {
let char_index = highlight.char_index as usize;
let char_length = highlight.char_length as usize;
let (byte_index, byte_length) = char_to_byte_range(char_index, char_length, text);
match byte_indexes.entry(byte_index) {
Entry::Vacant(entry) => {
entry.insert(byte_length);
}
Entry::Occupied(mut entry) => {
if *entry.get() < byte_length {
entry.insert(byte_length);
}
}
}
}
let mut title_areas = Vec::new();
title_areas.push(0);
for (byte_index, length) in byte_indexes {
title_areas.push(byte_index);
title_areas.push(byte_index + length);
}
title_areas.push(text.len());
title_areas.sort_unstable();
title_areas
}
/// note: matches must have been sorted by `char_index` and `char_length` before being passed.
///
/// ```no_run
/// matches.sort_unstable_by_key(|m| (m.char_index, m.char_length));
///
/// let matches = matches.matches.iter().filter(|m| SchemaAttr::new(m.attribute) == attr).cloned();
///
/// let (text, matches) = crop_text(&text, matches, 35);
/// ```
fn crop_text(
text: &str,
highlights: impl IntoIterator<Item = Highlight>,
context: usize,
) -> (String, Vec<Highlight>) {
let mut highlights = highlights.into_iter().peekable();
let char_index = highlights
.peek()
.map(|m| m.char_index as usize)
.unwrap_or(0);
let start = char_index.saturating_sub(context);
let text = text.chars().skip(start).take(context * 2).collect();
let highlights = highlights
.take_while(|m| (m.char_index as usize) + (m.char_length as usize) <= start + (context * 2))
.map(|highlight| Highlight {
char_index: highlight.char_index - start as u16,
..highlight
})
.collect();
(text, highlights)
}
fn search_command(command: SearchCommand, database: Database) -> Result<(), Box<dyn Error>> {
let db = &database;
let index = database
.open_index(&command.index_uid)
.expect("Could not find index");
let reader = db.main_read_txn().unwrap();
let schema = index.main.schema(&reader)?;
reader.abort();
let schema = schema.ok_or(meilisearch_core::Error::SchemaMissing)?;
let fields = command.displayed_fields.iter().map(String::as_str);
let fields = HashSet::from_iter(fields);
let config = Config::builder().auto_add_history(true).build();
let mut readline = Editor::<()>::with_config(config);
let _ = readline.load_history("query-history.txt");
for result in readline.iter("Searching for: ") {
match result {
Ok(query) => {
let start_total = Instant::now();
let reader = db.main_read_txn().unwrap();
let ref_index = &index;
let ref_reader = &reader;
let mut builder = index.query_builder();
if let Some(timeout) = command.fetch_timeout_ms {
builder.with_fetch_timeout(Duration::from_millis(timeout));
}
if let Some(ref filter) = command.filter {
let filter = filter.as_str();
let (positive, filter) = if filter.chars().next() == Some('!') {
(false, &filter[1..])
} else {
(true, filter)
};
let attr = schema
.id(filter)
.expect("Could not find filtered attribute");
builder.with_filter(move |document_id| {
let string: String = ref_index
.document_attribute(ref_reader, document_id, attr)
.unwrap()
.unwrap();
(string == "true") == positive
});
}
let (documents, _nb_hits) = builder.query(ref_reader, &query, 0..command.number_results)?;
let mut retrieve_duration = Duration::default();
let number_of_documents = documents.len();
for mut doc in documents {
doc.highlights
.sort_unstable_by_key(|m| (m.char_index, m.char_length));
let start_retrieve = Instant::now();
let result = index.document::<Document>(&reader, Some(&fields), doc.id);
retrieve_duration += start_retrieve.elapsed();
match result {
Ok(Some(document)) => {
println!("raw-id: {:?}", doc.id);
for (name, text) in document.0 {
print!("{}: ", name);
let attr = schema.id(&name).unwrap();
let highlights = doc
.highlights
.iter()
.filter(|m| FieldId::new(m.attribute) == attr)
.cloned();
let (text, highlights) =
crop_text(&text, highlights, command.char_context);
let areas = create_highlight_areas(&text, &highlights);
display_highlights(&text, &areas)?;
println!();
}
}
Ok(None) => eprintln!("missing document"),
Err(e) => eprintln!("{}", e),
}
let mut matching_attributes = HashSet::new();
for highlight in doc.highlights {
let attr = FieldId::new(highlight.attribute);
let name = schema.name(attr);
matching_attributes.insert(name);
}
let matching_attributes = Vec::from_iter(matching_attributes);
println!("matching in: {:?}", matching_attributes);
println!();
}
eprintln!(
"whole documents fields retrieve took {:.2?}",
retrieve_duration
);
eprintln!(
"===== Found {} results in {:.2?} =====",
number_of_documents,
start_total.elapsed()
);
}
Err(err) => {
println!("Error: {:?}", err);
break;
}
}
}
readline.save_history("query-history.txt").unwrap();
Ok(())
}
fn show_updates_command(
command: ShowUpdatesCommand,
database: Database,
) -> Result<(), Box<dyn Error>> {
let db = &database;
let index = database
.open_index(&command.index_uid)
.expect("Could not find index");
let reader = db.update_read_txn().unwrap();
let updates = index.all_updates_status(&reader)?;
println!("{:#?}", updates);
reader.abort();
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
let opt = Command::from_args();
let database = Database::open_or_create(opt.path())?;
match opt {
Command::Index(command) => index_command(command, database),
Command::Search(command) => search_command(command, database),
Command::ShowUpdates(command) => show_updates_command(command, database),
}
}

View File

@ -0,0 +1,53 @@
use levenshtein_automata::{LevenshteinAutomatonBuilder as LevBuilder, DFA};
use once_cell::sync::OnceCell;
static LEVDIST0: OnceCell<LevBuilder> = OnceCell::new();
static LEVDIST1: OnceCell<LevBuilder> = OnceCell::new();
static LEVDIST2: OnceCell<LevBuilder> = OnceCell::new();
#[derive(Copy, Clone)]
enum PrefixSetting {
Prefix,
NoPrefix,
}
fn build_dfa_with_setting(query: &str, setting: PrefixSetting) -> DFA {
use PrefixSetting::{NoPrefix, Prefix};
match query.len() {
0..=4 => {
let builder = LEVDIST0.get_or_init(|| LevBuilder::new(0, true));
match setting {
Prefix => builder.build_prefix_dfa(query),
NoPrefix => builder.build_dfa(query),
}
}
5..=8 => {
let builder = LEVDIST1.get_or_init(|| LevBuilder::new(1, true));
match setting {
Prefix => builder.build_prefix_dfa(query),
NoPrefix => builder.build_dfa(query),
}
}
_ => {
let builder = LEVDIST2.get_or_init(|| LevBuilder::new(2, true));
match setting {
Prefix => builder.build_prefix_dfa(query),
NoPrefix => builder.build_dfa(query),
}
}
}
}
pub fn build_prefix_dfa(query: &str) -> DFA {
build_dfa_with_setting(query, PrefixSetting::Prefix)
}
pub fn build_dfa(query: &str) -> DFA {
build_dfa_with_setting(query, PrefixSetting::NoPrefix)
}
pub fn build_exact_dfa(query: &str) -> DFA {
let builder = LEVDIST0.get_or_init(|| LevBuilder::new(0, true));
builder.build_dfa(query)
}

View File

@ -0,0 +1,15 @@
mod dfa;
use meilisearch_tokenizer::is_cjk;
pub use self::dfa::{build_dfa, build_prefix_dfa, build_exact_dfa};
pub fn normalize_str(string: &str) -> String {
let mut string = string.to_lowercase();
if !string.contains(is_cjk) {
string = deunicode::deunicode_with_tofu(&string, "");
}
string
}

View File

@ -0,0 +1,560 @@
use std::borrow::Cow;
use std::collections::HashMap;
use std::mem;
use std::ops::Deref;
use std::ops::Range;
use std::rc::Rc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
use std::fmt;
use compact_arena::{SmallArena, Idx32, mk_arena};
use log::debug;
use meilisearch_types::DocIndex;
use sdset::{Set, SetBuf, exponential_search};
use slice_group_by::{GroupBy, GroupByMut};
use crate::error::Error;
use crate::criterion::{Criteria, Context, ContextMut};
use crate::distinct_map::{BufferedDistinctMap, DistinctMap};
use crate::raw_document::RawDocument;
use crate::{database::MainT, reordered_attrs::ReorderedAttrs};
use crate::{store, Document, DocumentId, MResult};
use crate::query_tree::{create_query_tree, traverse_query_tree};
use crate::query_tree::{Operation, QueryResult, QueryKind, QueryId, PostingsKey};
use crate::query_tree::Context as QTContext;
pub fn bucket_sort<'c, FI>(
reader: &heed::RoTxn<MainT>,
query: &str,
range: Range<usize>,
filter: Option<FI>,
criteria: Criteria<'c>,
searchable_attrs: Option<ReorderedAttrs>,
main_store: store::Main,
postings_lists_store: store::PostingsLists,
documents_fields_counts_store: store::DocumentsFieldsCounts,
synonyms_store: store::Synonyms,
prefix_documents_cache_store: store::PrefixDocumentsCache,
prefix_postings_lists_cache_store: store::PrefixPostingsListsCache,
) -> MResult<(Vec<Document>, usize)>
where
FI: Fn(DocumentId) -> bool,
{
// We delegate the filter work to the distinct query builder,
// specifying a distinct rule that has no effect.
if filter.is_some() {
let distinct = |_| None;
let distinct_size = 1;
return bucket_sort_with_distinct(
reader,
query,
range,
filter,
distinct,
distinct_size,
criteria,
searchable_attrs,
main_store,
postings_lists_store,
documents_fields_counts_store,
synonyms_store,
prefix_documents_cache_store,
prefix_postings_lists_cache_store,
);
}
let words_set = match unsafe { main_store.static_words_fst(reader)? } {
Some(words) => words,
None => return Ok((Vec::new(), 0)),
};
let stop_words = main_store.stop_words_fst(reader)?.unwrap_or_default();
let context = QTContext {
words_set,
stop_words,
synonyms: synonyms_store,
postings_lists: postings_lists_store,
prefix_postings_lists: prefix_postings_lists_cache_store,
};
let (operation, mapping) = create_query_tree(reader, &context, query)?;
debug!("operation:\n{:?}", operation);
debug!("mapping:\n{:?}", mapping);
fn recurs_operation<'o>(map: &mut HashMap<QueryId, &'o QueryKind>, operation: &'o Operation) {
match operation {
Operation::And(ops) => ops.iter().for_each(|op| recurs_operation(map, op)),
Operation::Or(ops) => ops.iter().for_each(|op| recurs_operation(map, op)),
Operation::Query(query) => { map.insert(query.id, &query.kind); },
}
}
let mut queries_kinds = HashMap::new();
recurs_operation(&mut queries_kinds, &operation);
let QueryResult { docids, queries } = traverse_query_tree(reader, &context, &operation)?;
debug!("found {} documents", docids.len());
debug!("number of postings {:?}", queries.len());
let before = Instant::now();
mk_arena!(arena);
let mut bare_matches = cleanup_bare_matches(&mut arena, &docids, queries);
debug!("matches cleaned in {:.02?}", before.elapsed());
let before_bucket_sort = Instant::now();
let before_raw_documents_building = Instant::now();
let mut raw_documents = Vec::new();
for bare_matches in bare_matches.linear_group_by_key_mut(|sm| sm.document_id) {
let raw_document = RawDocument::new(bare_matches, &mut arena, searchable_attrs.as_ref());
raw_documents.push(raw_document);
}
debug!("creating {} candidates documents took {:.02?}",
raw_documents.len(),
before_raw_documents_building.elapsed(),
);
let before_criterion_loop = Instant::now();
let proximity_count = AtomicUsize::new(0);
let mut groups = vec![raw_documents.as_mut_slice()];
'criteria: for criterion in criteria.as_ref() {
let tmp_groups = mem::replace(&mut groups, Vec::new());
let mut documents_seen = 0;
for mut group in tmp_groups {
let before_criterion_preparation = Instant::now();
let ctx = ContextMut {
reader,
postings_lists: &mut arena,
query_mapping: &mapping,
documents_fields_counts_store,
};
criterion.prepare(ctx, &mut group)?;
debug!("{:?} preparation took {:.02?}", criterion.name(), before_criterion_preparation.elapsed());
let ctx = Context {
postings_lists: &arena,
query_mapping: &mapping,
};
let before_criterion_sort = Instant::now();
group.sort_unstable_by(|a, b| criterion.evaluate(&ctx, a, b));
debug!("{:?} evaluation took {:.02?}", criterion.name(), before_criterion_sort.elapsed());
for group in group.binary_group_by_mut(|a, b| criterion.eq(&ctx, a, b)) {
debug!("{:?} produced a group of size {}", criterion.name(), group.len());
documents_seen += group.len();
groups.push(group);
// we have sort enough documents if the last document sorted is after
// the end of the requested range, we can continue to the next criterion
if documents_seen >= range.end {
continue 'criteria;
}
}
}
}
debug!("criterion loop took {:.02?}", before_criterion_loop.elapsed());
debug!("proximity evaluation called {} times", proximity_count.load(Ordering::Relaxed));
let schema = main_store.schema(reader)?.ok_or(Error::SchemaMissing)?;
let iter = raw_documents.into_iter().skip(range.start).take(range.len());
let iter = iter.map(|rd| Document::from_raw(rd, &queries_kinds, &arena, searchable_attrs.as_ref(), &schema));
let documents = iter.collect();
debug!("bucket sort took {:.02?}", before_bucket_sort.elapsed());
Ok((documents, docids.len()))
}
pub fn bucket_sort_with_distinct<'c, FI, FD>(
reader: &heed::RoTxn<MainT>,
query: &str,
range: Range<usize>,
filter: Option<FI>,
distinct: FD,
distinct_size: usize,
criteria: Criteria<'c>,
searchable_attrs: Option<ReorderedAttrs>,
main_store: store::Main,
postings_lists_store: store::PostingsLists,
documents_fields_counts_store: store::DocumentsFieldsCounts,
synonyms_store: store::Synonyms,
_prefix_documents_cache_store: store::PrefixDocumentsCache,
prefix_postings_lists_cache_store: store::PrefixPostingsListsCache,
) -> MResult<(Vec<Document>, usize)>
where
FI: Fn(DocumentId) -> bool,
FD: Fn(DocumentId) -> Option<u64>,
{
let words_set = match unsafe { main_store.static_words_fst(reader)? } {
Some(words) => words,
None => return Ok((Vec::new(), 0)),
};
let stop_words = main_store.stop_words_fst(reader)?.unwrap_or_default();
let context = QTContext {
words_set,
stop_words,
synonyms: synonyms_store,
postings_lists: postings_lists_store,
prefix_postings_lists: prefix_postings_lists_cache_store,
};
let (operation, mapping) = create_query_tree(reader, &context, query)?;
debug!("operation:\n{:?}", operation);
debug!("mapping:\n{:?}", mapping);
fn recurs_operation<'o>(map: &mut HashMap<QueryId, &'o QueryKind>, operation: &'o Operation) {
match operation {
Operation::And(ops) => ops.iter().for_each(|op| recurs_operation(map, op)),
Operation::Or(ops) => ops.iter().for_each(|op| recurs_operation(map, op)),
Operation::Query(query) => { map.insert(query.id, &query.kind); },
}
}
let mut queries_kinds = HashMap::new();
recurs_operation(&mut queries_kinds, &operation);
let QueryResult { docids, queries } = traverse_query_tree(reader, &context, &operation)?;
debug!("found {} documents", docids.len());
debug!("number of postings {:?}", queries.len());
let before = Instant::now();
mk_arena!(arena);
let mut bare_matches = cleanup_bare_matches(&mut arena, &docids, queries);
debug!("matches cleaned in {:.02?}", before.elapsed());
let before_raw_documents_building = Instant::now();
let mut raw_documents = Vec::new();
for bare_matches in bare_matches.linear_group_by_key_mut(|sm| sm.document_id) {
let raw_document = RawDocument::new(bare_matches, &mut arena, searchable_attrs.as_ref());
raw_documents.push(raw_document);
}
debug!("creating {} candidates documents took {:.02?}",
raw_documents.len(),
before_raw_documents_building.elapsed(),
);
let mut groups = vec![raw_documents.as_mut_slice()];
let mut key_cache = HashMap::new();
let mut filter_map = HashMap::new();
// these two variables informs on the current distinct map and
// on the raw offset of the start of the group where the
// range.start bound is located according to the distinct function
let mut distinct_map = DistinctMap::new(distinct_size);
let mut distinct_raw_offset = 0;
'criteria: for criterion in criteria.as_ref() {
let tmp_groups = mem::replace(&mut groups, Vec::new());
let mut buf_distinct = BufferedDistinctMap::new(&mut distinct_map);
let mut documents_seen = 0;
for mut group in tmp_groups {
// if this group does not overlap with the requested range,
// push it without sorting and splitting it
if documents_seen + group.len() < distinct_raw_offset {
documents_seen += group.len();
groups.push(group);
continue;
}
let ctx = ContextMut {
reader,
postings_lists: &mut arena,
query_mapping: &mapping,
documents_fields_counts_store,
};
let before_criterion_preparation = Instant::now();
criterion.prepare(ctx, &mut group)?;
debug!("{:?} preparation took {:.02?}", criterion.name(), before_criterion_preparation.elapsed());
let ctx = Context {
postings_lists: &arena,
query_mapping: &mapping,
};
let before_criterion_sort = Instant::now();
group.sort_unstable_by(|a, b| criterion.evaluate(&ctx, a, b));
debug!("{:?} evaluation took {:.02?}", criterion.name(), before_criterion_sort.elapsed());
for group in group.binary_group_by_mut(|a, b| criterion.eq(&ctx, a, b)) {
// we must compute the real distinguished len of this sub-group
for document in group.iter() {
let filter_accepted = match &filter {
Some(filter) => {
let entry = filter_map.entry(document.id);
*entry.or_insert_with(|| (filter)(document.id))
}
None => true,
};
if filter_accepted {
let entry = key_cache.entry(document.id);
let key = entry.or_insert_with(|| (distinct)(document.id).map(Rc::new));
match key.clone() {
Some(key) => buf_distinct.register(key),
None => buf_distinct.register_without_key(),
};
}
// the requested range end is reached: stop computing distinct
if buf_distinct.len() >= range.end {
break;
}
}
documents_seen += group.len();
groups.push(group);
// if this sub-group does not overlap with the requested range
// we must update the distinct map and its start index
if buf_distinct.len() < range.start {
buf_distinct.transfert_to_internal();
distinct_raw_offset = documents_seen;
}
// we have sort enough documents if the last document sorted is after
// the end of the requested range, we can continue to the next criterion
if buf_distinct.len() >= range.end {
continue 'criteria;
}
}
}
}
// once we classified the documents related to the current
// automatons we save that as the next valid result
let mut seen = BufferedDistinctMap::new(&mut distinct_map);
let schema = main_store.schema(reader)?.ok_or(Error::SchemaMissing)?;
let mut documents = Vec::with_capacity(range.len());
for raw_document in raw_documents.into_iter().skip(distinct_raw_offset) {
let filter_accepted = match &filter {
Some(_) => filter_map.remove(&raw_document.id).unwrap(),
None => true,
};
if filter_accepted {
let key = key_cache.remove(&raw_document.id).unwrap();
let distinct_accepted = match key {
Some(key) => seen.register(key),
None => seen.register_without_key(),
};
if distinct_accepted && seen.len() > range.start {
documents.push(Document::from_raw(raw_document, &queries_kinds, &arena, searchable_attrs.as_ref(), &schema));
if documents.len() == range.len() {
break;
}
}
}
}
Ok((documents, docids.len()))
}
fn cleanup_bare_matches<'tag, 'txn>(
arena: &mut SmallArena<'tag, PostingsListView<'txn>>,
docids: &Set<DocumentId>,
queries: HashMap<PostingsKey, Cow<'txn, Set<DocIndex>>>,
) -> Vec<BareMatch<'tag>>
{
let docidslen = docids.len() as f32;
let mut bare_matches = Vec::new();
for (PostingsKey { query, input, distance, is_exact }, matches) in queries {
let postings_list_view = PostingsListView::original(Rc::from(input), Rc::new(matches));
let pllen = postings_list_view.len() as f32;
if docidslen / pllen >= 0.8 {
let mut offset = 0;
for matches in postings_list_view.linear_group_by_key(|m| m.document_id) {
let document_id = matches[0].document_id;
if docids.contains(&document_id) {
let range = postings_list_view.range(offset, matches.len());
let posting_list_index = arena.add(range);
let bare_match = BareMatch {
document_id,
query_index: query.id,
distance,
is_exact,
postings_list: posting_list_index,
};
bare_matches.push(bare_match);
}
offset += matches.len();
}
} else {
let mut offset = 0;
for id in docids.as_slice() {
let di = DocIndex { document_id: *id, ..DocIndex::default() };
let pos = exponential_search(&postings_list_view[offset..], &di).unwrap_or_else(|x| x);
offset += pos;
let group = postings_list_view[offset..]
.linear_group_by_key(|m| m.document_id)
.next()
.filter(|matches| matches[0].document_id == *id);
if let Some(matches) = group {
let range = postings_list_view.range(offset, matches.len());
let posting_list_index = arena.add(range);
let bare_match = BareMatch {
document_id: *id,
query_index: query.id,
distance,
is_exact,
postings_list: posting_list_index,
};
bare_matches.push(bare_match);
}
}
}
}
let before_raw_documents_presort = Instant::now();
bare_matches.sort_unstable_by_key(|sm| sm.document_id);
debug!("sort by documents ids took {:.02?}", before_raw_documents_presort.elapsed());
bare_matches
}
pub struct BareMatch<'tag> {
pub document_id: DocumentId,
pub query_index: usize,
pub distance: u8,
pub is_exact: bool,
pub postings_list: Idx32<'tag>,
}
impl fmt::Debug for BareMatch<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BareMatch")
.field("document_id", &self.document_id)
.field("query_index", &self.query_index)
.field("distance", &self.distance)
.field("is_exact", &self.is_exact)
.finish()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct SimpleMatch {
pub query_index: usize,
pub distance: u8,
pub attribute: u16,
pub word_index: u16,
pub is_exact: bool,
}
#[derive(Clone)]
pub enum PostingsListView<'txn> {
Original {
input: Rc<[u8]>,
postings_list: Rc<Cow<'txn, Set<DocIndex>>>,
offset: usize,
len: usize,
},
Rewritten {
input: Rc<[u8]>,
postings_list: SetBuf<DocIndex>,
},
}
impl fmt::Debug for PostingsListView<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PostingsListView")
.field("input", &std::str::from_utf8(&self.input()).unwrap())
.field("postings_list", &self.as_ref())
.finish()
}
}
impl<'txn> PostingsListView<'txn> {
pub fn original(input: Rc<[u8]>, postings_list: Rc<Cow<'txn, Set<DocIndex>>>) -> PostingsListView<'txn> {
let len = postings_list.len();
PostingsListView::Original { input, postings_list, offset: 0, len }
}
pub fn rewritten(input: Rc<[u8]>, postings_list: SetBuf<DocIndex>) -> PostingsListView<'txn> {
PostingsListView::Rewritten { input, postings_list }
}
pub fn rewrite_with(&mut self, postings_list: SetBuf<DocIndex>) {
let input = match self {
PostingsListView::Original { input, .. } => input.clone(),
PostingsListView::Rewritten { input, .. } => input.clone(),
};
*self = PostingsListView::rewritten(input, postings_list);
}
pub fn len(&self) -> usize {
match self {
PostingsListView::Original { len, .. } => *len,
PostingsListView::Rewritten { postings_list, .. } => postings_list.len(),
}
}
pub fn input(&self) -> &[u8] {
match self {
PostingsListView::Original { ref input, .. } => input,
PostingsListView::Rewritten { ref input, .. } => input,
}
}
pub fn range(&self, range_offset: usize, range_len: usize) -> PostingsListView<'txn> {
match self {
PostingsListView::Original { input, postings_list, offset, len } => {
assert!(range_offset + range_len <= *len);
PostingsListView::Original {
input: input.clone(),
postings_list: postings_list.clone(),
offset: offset + range_offset,
len: range_len,
}
},
PostingsListView::Rewritten { .. } => {
panic!("Cannot create a range on a rewritten postings list view");
}
}
}
}
impl AsRef<Set<DocIndex>> for PostingsListView<'_> {
fn as_ref(&self) -> &Set<DocIndex> {
self
}
}
impl Deref for PostingsListView<'_> {
type Target = Set<DocIndex>;
fn deref(&self) -> &Set<DocIndex> {
match *self {
PostingsListView::Original { ref postings_list, offset, len, .. } => {
Set::new_unchecked(&postings_list[offset..offset + len])
},
PostingsListView::Rewritten { ref postings_list, .. } => postings_list,
}
}
}

View File

@ -0,0 +1,37 @@
use std::cmp::Ordering;
use slice_group_by::GroupBy;
use crate::{RawDocument, MResult};
use crate::bucket_sort::SimpleMatch;
use super::{Criterion, Context, ContextMut, prepare_bare_matches};
pub struct Attribute;
impl Criterion for Attribute {
fn name(&self) -> &str { "attribute" }
fn prepare<'h, 'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: ContextMut<'h, 'p, 'tag, 'txn, 'q>,
documents: &mut [RawDocument<'r, 'tag>],
) -> MResult<()>
{
prepare_bare_matches(documents, ctx.postings_lists, ctx.query_mapping);
Ok(())
}
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
#[inline]
fn sum_of_attribute(matches: &[SimpleMatch]) -> usize {
let mut sum_of_attribute = 0;
for group in matches.linear_group_by_key(|bm| bm.query_index) {
sum_of_attribute += group[0].attribute as usize;
}
sum_of_attribute
}
let lhs = sum_of_attribute(&lhs.processed_matches);
let rhs = sum_of_attribute(&rhs.processed_matches);
lhs.cmp(&rhs)
}
}

View File

@ -0,0 +1,16 @@
use std::cmp::Ordering;
use crate::RawDocument;
use super::{Criterion, Context};
pub struct DocumentId;
impl Criterion for DocumentId {
fn name(&self) -> &str { "stable document id" }
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
let lhs = &lhs.id;
let rhs = &rhs.id;
lhs.cmp(rhs)
}
}

View File

@ -0,0 +1,78 @@
use std::cmp::{Ordering, Reverse};
use std::collections::hash_map::{HashMap, Entry};
use meilisearch_schema::IndexedPos;
use slice_group_by::GroupBy;
use crate::{RawDocument, MResult};
use crate::bucket_sort::BareMatch;
use super::{Criterion, Context, ContextMut};
pub struct Exactness;
impl Criterion for Exactness {
fn name(&self) -> &str { "exactness" }
fn prepare<'h, 'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: ContextMut<'h, 'p, 'tag, 'txn, 'q>,
documents: &mut [RawDocument<'r, 'tag>],
) -> MResult<()>
{
let store = ctx.documents_fields_counts_store;
let reader = ctx.reader;
'documents: for doc in documents {
doc.bare_matches.sort_unstable_by_key(|bm| (bm.query_index, Reverse(bm.is_exact)));
// mark the document if we find a "one word field" that matches
let mut fields_counts = HashMap::new();
for group in doc.bare_matches.linear_group_by_key(|bm| bm.query_index) {
for group in group.linear_group_by_key(|bm| bm.is_exact) {
if !group[0].is_exact { break }
for bm in group {
for di in ctx.postings_lists[bm.postings_list].as_ref() {
let attr = IndexedPos(di.attribute);
let count = match fields_counts.entry(attr) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let count = store.document_field_count(reader, doc.id, attr)?;
*entry.insert(count)
},
};
if count == Some(1) {
doc.contains_one_word_field = true;
continue 'documents
}
}
}
}
}
}
Ok(())
}
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
#[inline]
fn sum_exact_query_words(matches: &[BareMatch]) -> usize {
let mut sum_exact_query_words = 0;
for group in matches.linear_group_by_key(|bm| bm.query_index) {
sum_exact_query_words += group[0].is_exact as usize;
}
sum_exact_query_words
}
// does it contains a "one word field"
lhs.contains_one_word_field.cmp(&rhs.contains_one_word_field).reverse()
// if not, with document contains the more exact words
.then_with(|| {
let lhs = sum_exact_query_words(&lhs.bare_matches);
let rhs = sum_exact_query_words(&rhs.bare_matches);
lhs.cmp(&rhs).reverse()
})
}
}

View File

@ -0,0 +1,291 @@
use std::cmp::{self, Ordering};
use std::collections::HashMap;
use std::ops::Range;
use compact_arena::SmallArena;
use sdset::SetBuf;
use slice_group_by::GroupBy;
use crate::bucket_sort::{SimpleMatch, PostingsListView};
use crate::database::MainT;
use crate::query_tree::QueryId;
use crate::{store, RawDocument, MResult};
mod typo;
mod words;
mod proximity;
mod attribute;
mod words_position;
mod exactness;
mod document_id;
mod sort_by_attr;
pub use self::typo::Typo;
pub use self::words::Words;
pub use self::proximity::Proximity;
pub use self::attribute::Attribute;
pub use self::words_position::WordsPosition;
pub use self::exactness::Exactness;
pub use self::document_id::DocumentId;
pub use self::sort_by_attr::SortByAttr;
pub trait Criterion {
fn name(&self) -> &str;
fn prepare<'h, 'p, 'tag, 'txn, 'q, 'r>(
&self,
_ctx: ContextMut<'h, 'p, 'tag, 'txn, 'q>,
_documents: &mut [RawDocument<'r, 'tag>],
) -> MResult<()>
{
Ok(())
}
fn evaluate<'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: &Context<'p, 'tag, 'txn, 'q>,
lhs: &RawDocument<'r, 'tag>,
rhs: &RawDocument<'r, 'tag>,
) -> Ordering;
#[inline]
fn eq<'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: &Context<'p, 'tag, 'txn, 'q>,
lhs: &RawDocument<'r, 'tag>,
rhs: &RawDocument<'r, 'tag>,
) -> bool
{
self.evaluate(ctx, lhs, rhs) == Ordering::Equal
}
}
pub struct ContextMut<'h, 'p, 'tag, 'txn, 'q> {
pub reader: &'h heed::RoTxn<MainT>,
pub postings_lists: &'p mut SmallArena<'tag, PostingsListView<'txn>>,
pub query_mapping: &'q HashMap<QueryId, Range<usize>>,
pub documents_fields_counts_store: store::DocumentsFieldsCounts,
}
pub struct Context<'p, 'tag, 'txn, 'q> {
pub postings_lists: &'p SmallArena<'tag, PostingsListView<'txn>>,
pub query_mapping: &'q HashMap<QueryId, Range<usize>>,
}
#[derive(Default)]
pub struct CriteriaBuilder<'a> {
inner: Vec<Box<dyn Criterion + 'a>>,
}
impl<'a> CriteriaBuilder<'a> {
pub fn new() -> CriteriaBuilder<'a> {
CriteriaBuilder { inner: Vec::new() }
}
pub fn with_capacity(capacity: usize) -> CriteriaBuilder<'a> {
CriteriaBuilder {
inner: Vec::with_capacity(capacity),
}
}
pub fn reserve(&mut self, additional: usize) {
self.inner.reserve(additional)
}
pub fn add<C: 'a>(mut self, criterion: C) -> CriteriaBuilder<'a>
where
C: Criterion,
{
self.push(criterion);
self
}
pub fn push<C: 'a>(&mut self, criterion: C)
where
C: Criterion,
{
self.inner.push(Box::new(criterion));
}
pub fn build(self) -> Criteria<'a> {
Criteria { inner: self.inner }
}
}
pub struct Criteria<'a> {
inner: Vec<Box<dyn Criterion + 'a>>,
}
impl<'a> Default for Criteria<'a> {
fn default() -> Self {
CriteriaBuilder::with_capacity(7)
.add(Typo)
.add(Words)
.add(Proximity)
.add(Attribute)
.add(WordsPosition)
.add(Exactness)
.add(DocumentId)
.build()
}
}
impl<'a> AsRef<[Box<dyn Criterion + 'a>]> for Criteria<'a> {
fn as_ref(&self) -> &[Box<dyn Criterion + 'a>] {
&self.inner
}
}
fn prepare_query_distances<'a, 'tag, 'txn>(
documents: &mut [RawDocument<'a, 'tag>],
query_mapping: &HashMap<QueryId, Range<usize>>,
postings_lists: &SmallArena<'tag, PostingsListView<'txn>>,
) {
for document in documents {
if !document.processed_distances.is_empty() { continue }
let mut processed = Vec::new();
for m in document.bare_matches.iter() {
if postings_lists[m.postings_list].is_empty() { continue }
let range = query_mapping[&(m.query_index as usize)].clone();
let new_len = cmp::max(range.end as usize, processed.len());
processed.resize(new_len, None);
for index in range {
let index = index as usize;
processed[index] = match processed[index] {
Some(distance) if distance > m.distance => Some(m.distance),
Some(distance) => Some(distance),
None => Some(m.distance),
};
}
}
document.processed_distances = processed;
}
}
fn prepare_bare_matches<'a, 'tag, 'txn>(
documents: &mut [RawDocument<'a, 'tag>],
postings_lists: &mut SmallArena<'tag, PostingsListView<'txn>>,
query_mapping: &HashMap<QueryId, Range<usize>>,
) {
for document in documents {
if !document.processed_matches.is_empty() { continue }
let mut processed = Vec::new();
for m in document.bare_matches.iter() {
let postings_list = &postings_lists[m.postings_list];
processed.reserve(postings_list.len());
for di in postings_list.as_ref() {
let simple_match = SimpleMatch {
query_index: m.query_index,
distance: m.distance,
attribute: di.attribute,
word_index: di.word_index,
is_exact: m.is_exact,
};
processed.push(simple_match);
}
}
let processed = multiword_rewrite_matches(&mut processed, query_mapping);
document.processed_matches = processed.into_vec();
}
}
fn multiword_rewrite_matches(
matches: &mut [SimpleMatch],
query_mapping: &HashMap<QueryId, Range<usize>>,
) -> SetBuf<SimpleMatch>
{
matches.sort_unstable_by_key(|m| (m.attribute, m.word_index));
let mut padded_matches = Vec::with_capacity(matches.len());
// let before_padding = Instant::now();
// for each attribute of each document
for same_document_attribute in matches.linear_group_by_key(|m| m.attribute) {
// padding will only be applied
// to word indices in the same attribute
let mut padding = 0;
let mut iter = same_document_attribute.linear_group_by_key(|m| m.word_index);
// for each match at the same position
// in this document attribute
while let Some(same_word_index) = iter.next() {
// find the biggest padding
let mut biggest = 0;
for match_ in same_word_index {
let mut replacement = query_mapping[&(match_.query_index as usize)].clone();
let replacement_len = replacement.len();
let nexts = iter.remainder().linear_group_by_key(|m| m.word_index);
if let Some(query_index) = replacement.next() {
let word_index = match_.word_index + padding as u16;
let match_ = SimpleMatch { query_index, word_index, ..*match_ };
padded_matches.push(match_);
}
let mut found = false;
// look ahead and if there already is a match
// corresponding to this padding word, abort the padding
'padding: for (x, next_group) in nexts.enumerate() {
for (i, query_index) in replacement.clone().enumerate().skip(x) {
let word_index = match_.word_index + padding as u16 + (i + 1) as u16;
let padmatch = SimpleMatch { query_index, word_index, ..*match_ };
for nmatch_ in next_group {
let mut rep = query_mapping[&(nmatch_.query_index as usize)].clone();
let query_index = rep.next().unwrap();
if query_index == padmatch.query_index {
if !found {
// if we find a corresponding padding for the
// first time we must push preceding paddings
for (i, query_index) in replacement.clone().enumerate().take(i) {
let word_index = match_.word_index + padding as u16 + (i + 1) as u16;
let match_ = SimpleMatch { query_index, word_index, ..*match_ };
padded_matches.push(match_);
biggest = biggest.max(i + 1);
}
}
padded_matches.push(padmatch);
found = true;
continue 'padding;
}
}
}
// if we do not find a corresponding padding in the
// next groups so stop here and pad what was found
break;
}
if !found {
// if no padding was found in the following matches
// we must insert the entire padding
for (i, query_index) in replacement.enumerate() {
let word_index = match_.word_index + padding as u16 + (i + 1) as u16;
let match_ = SimpleMatch { query_index, word_index, ..*match_ };
padded_matches.push(match_);
}
biggest = biggest.max(replacement_len - 1);
}
}
padding += biggest;
}
}
// debug!("padding matches took {:.02?}", before_padding.elapsed());
// With this check we can see that the loop above takes something
// like 43% of the search time even when no rewrite is needed.
// assert_eq!(before_matches, padded_matches);
SetBuf::from_dirty(padded_matches)
}

View File

@ -0,0 +1,68 @@
use std::cmp::{self, Ordering};
use slice_group_by::GroupBy;
use crate::bucket_sort::{SimpleMatch};
use crate::{RawDocument, MResult};
use super::{Criterion, Context, ContextMut, prepare_bare_matches};
const MAX_DISTANCE: u16 = 8;
pub struct Proximity;
impl Criterion for Proximity {
fn name(&self) -> &str { "proximity" }
fn prepare<'h, 'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: ContextMut<'h, 'p, 'tag, 'txn, 'q>,
documents: &mut [RawDocument<'r, 'tag>],
) -> MResult<()>
{
prepare_bare_matches(documents, ctx.postings_lists, ctx.query_mapping);
Ok(())
}
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
fn index_proximity(lhs: u16, rhs: u16) -> u16 {
if lhs < rhs {
cmp::min(rhs - lhs, MAX_DISTANCE)
} else {
cmp::min(lhs - rhs, MAX_DISTANCE) + 1
}
}
fn attribute_proximity(lhs: SimpleMatch, rhs: SimpleMatch) -> u16 {
if lhs.attribute != rhs.attribute { MAX_DISTANCE }
else { index_proximity(lhs.word_index, rhs.word_index) }
}
fn min_proximity(lhs: &[SimpleMatch], rhs: &[SimpleMatch]) -> u16 {
let mut min_prox = u16::max_value();
for a in lhs {
for b in rhs {
let prox = attribute_proximity(*a, *b);
min_prox = cmp::min(min_prox, prox);
}
}
min_prox
}
fn matches_proximity(matches: &[SimpleMatch],) -> u16 {
let mut proximity = 0;
let mut iter = matches.linear_group_by_key(|m| m.query_index);
// iterate over groups by windows of size 2
let mut last = iter.next();
while let (Some(lhs), Some(rhs)) = (last, iter.next()) {
proximity += min_proximity(lhs, rhs);
last = Some(rhs);
}
proximity
}
let lhs = matches_proximity(&lhs.processed_matches);
let rhs = matches_proximity(&rhs.processed_matches);
lhs.cmp(&rhs)
}
}

View File

@ -0,0 +1,129 @@
use std::cmp::Ordering;
use std::error::Error;
use std::fmt;
use meilisearch_schema::{Schema, FieldId};
use crate::{RankedMap, RawDocument};
use super::{Criterion, Context};
/// An helper struct that permit to sort documents by
/// some of their stored attributes.
///
/// # Note
///
/// If a document cannot be deserialized it will be considered [`None`][].
///
/// Deserialized documents are compared like `Some(doc0).cmp(&Some(doc1))`,
/// so you must check the [`Ord`] of `Option` implementation.
///
/// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
/// [`Ord`]: https://doc.rust-lang.org/std/option/enum.Option.html#impl-Ord
///
/// # Example
///
/// ```ignore
/// use serde_derive::Deserialize;
/// use meilisearch::rank::criterion::*;
///
/// let custom_ranking = SortByAttr::lower_is_better(&ranked_map, &schema, "published_at")?;
///
/// let builder = CriteriaBuilder::with_capacity(8)
/// .add(Typo)
/// .add(Words)
/// .add(Proximity)
/// .add(Attribute)
/// .add(WordsPosition)
/// .add(Exactness)
/// .add(custom_ranking)
/// .add(DocumentId);
///
/// let criterion = builder.build();
///
/// ```
pub struct SortByAttr<'a> {
ranked_map: &'a RankedMap,
field_id: FieldId,
reversed: bool,
}
impl<'a> SortByAttr<'a> {
pub fn lower_is_better(
ranked_map: &'a RankedMap,
schema: &Schema,
attr_name: &str,
) -> Result<SortByAttr<'a>, SortByAttrError> {
SortByAttr::new(ranked_map, schema, attr_name, false)
}
pub fn higher_is_better(
ranked_map: &'a RankedMap,
schema: &Schema,
attr_name: &str,
) -> Result<SortByAttr<'a>, SortByAttrError> {
SortByAttr::new(ranked_map, schema, attr_name, true)
}
fn new(
ranked_map: &'a RankedMap,
schema: &Schema,
attr_name: &str,
reversed: bool,
) -> Result<SortByAttr<'a>, SortByAttrError> {
let field_id = match schema.id(attr_name) {
Some(field_id) => field_id,
None => return Err(SortByAttrError::AttributeNotFound),
};
if !schema.is_ranked(field_id) {
return Err(SortByAttrError::AttributeNotRegisteredForRanking);
}
Ok(SortByAttr {
ranked_map,
field_id,
reversed,
})
}
}
impl Criterion for SortByAttr<'_> {
fn name(&self) -> &str {
"sort by attribute"
}
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
let lhs = self.ranked_map.get(lhs.id, self.field_id);
let rhs = self.ranked_map.get(rhs.id, self.field_id);
match (lhs, rhs) {
(Some(lhs), Some(rhs)) => {
let order = lhs.cmp(&rhs);
if self.reversed {
order.reverse()
} else {
order
}
}
(None, Some(_)) => Ordering::Greater,
(Some(_), None) => Ordering::Less,
(None, None) => Ordering::Equal,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SortByAttrError {
AttributeNotFound,
AttributeNotRegisteredForRanking,
}
impl fmt::Display for SortByAttrError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use SortByAttrError::*;
match self {
AttributeNotFound => f.write_str("attribute not found in the schema"),
AttributeNotRegisteredForRanking => f.write_str("attribute not registered for ranking"),
}
}
}
impl Error for SortByAttrError {}

View File

@ -0,0 +1,55 @@
use std::cmp::Ordering;
use crate::{RawDocument, MResult};
use super::{Criterion, Context, ContextMut, prepare_query_distances};
pub struct Typo;
impl Criterion for Typo {
fn name(&self) -> &str { "typo" }
fn prepare<'h, 'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: ContextMut<'h, 'p, 'tag, 'txn, 'q>,
documents: &mut [RawDocument<'r, 'tag>],
) -> MResult<()>
{
prepare_query_distances(documents, ctx.query_mapping, ctx.postings_lists);
Ok(())
}
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
// This function is a wrong logarithmic 10 function.
// It is safe to panic on input number higher than 3,
// the number of typos is never bigger than that.
#[inline]
fn custom_log10(n: u8) -> f32 {
match n {
0 => 0.0, // log(1)
1 => 0.30102, // log(2)
2 => 0.47712, // log(3)
3 => 0.60205, // log(4)
_ => panic!("invalid number"),
}
}
#[inline]
fn compute_typos(distances: &[Option<u8>]) -> usize {
let mut number_words: usize = 0;
let mut sum_typos = 0.0;
for distance in distances {
if let Some(distance) = distance {
sum_typos += custom_log10(*distance);
number_words += 1;
}
}
(number_words as f32 / (sum_typos + 1.0) * 1000.0) as usize
}
let lhs = compute_typos(&lhs.processed_distances);
let rhs = compute_typos(&rhs.processed_distances);
lhs.cmp(&rhs).reverse()
}
}

View File

@ -0,0 +1,31 @@
use std::cmp::Ordering;
use crate::{RawDocument, MResult};
use super::{Criterion, Context, ContextMut, prepare_query_distances};
pub struct Words;
impl Criterion for Words {
fn name(&self) -> &str { "words" }
fn prepare<'h, 'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: ContextMut<'h, 'p, 'tag, 'txn, 'q>,
documents: &mut [RawDocument<'r, 'tag>],
) -> MResult<()>
{
prepare_query_distances(documents, ctx.query_mapping, ctx.postings_lists);
Ok(())
}
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
#[inline]
fn number_of_query_words(distances: &[Option<u8>]) -> usize {
distances.iter().cloned().filter(Option::is_some).count()
}
let lhs = number_of_query_words(&lhs.processed_distances);
let rhs = number_of_query_words(&rhs.processed_distances);
lhs.cmp(&rhs).reverse()
}
}

View File

@ -0,0 +1,37 @@
use std::cmp::Ordering;
use slice_group_by::GroupBy;
use crate::bucket_sort::SimpleMatch;
use crate::{RawDocument, MResult};
use super::{Criterion, Context, ContextMut, prepare_bare_matches};
pub struct WordsPosition;
impl Criterion for WordsPosition {
fn name(&self) -> &str { "words position" }
fn prepare<'h, 'p, 'tag, 'txn, 'q, 'r>(
&self,
ctx: ContextMut<'h, 'p, 'tag, 'txn, 'q>,
documents: &mut [RawDocument<'r, 'tag>],
) -> MResult<()>
{
prepare_bare_matches(documents, ctx.postings_lists, ctx.query_mapping);
Ok(())
}
fn evaluate(&self, _ctx: &Context, lhs: &RawDocument, rhs: &RawDocument) -> Ordering {
#[inline]
fn sum_words_position(matches: &[SimpleMatch]) -> usize {
let mut sum_words_position = 0;
for group in matches.linear_group_by_key(|bm| bm.query_index) {
sum_words_position += group[0].word_index as usize;
}
sum_words_position
}
let lhs = sum_words_position(&lhs.processed_matches);
let rhs = sum_words_position(&rhs.processed_matches);
lhs.cmp(&rhs)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,5 @@
use std::hash::Hash;
use hashbrown::HashMap;
use std::hash::Hash;
pub struct DistinctMap<K> {
inner: HashMap<K, usize>,
@ -12,7 +11,7 @@ impl<K: Hash + Eq> DistinctMap<K> {
pub fn new(limit: usize) -> Self {
DistinctMap {
inner: HashMap::new(),
limit: limit,
limit,
len: 0,
}
}
@ -31,7 +30,7 @@ pub struct BufferedDistinctMap<'a, K> {
impl<'a, K: Hash + Eq> BufferedDistinctMap<'a, K> {
pub fn new(internal: &'a mut DistinctMap<K>) -> BufferedDistinctMap<'a, K> {
BufferedDistinctMap {
internal: internal,
internal,
inner: HashMap::new(),
len: 0,
}

View File

@ -0,0 +1,158 @@
use crate::serde::{DeserializerError, SerializerError};
use serde_json::Error as SerdeJsonError;
use pest::error::Error as PestError;
use crate::filters::Rule;
use std::{error, fmt, io};
pub use bincode::Error as BincodeError;
pub use fst::Error as FstError;
pub use heed::Error as HeedError;
pub use pest::error as pest_error;
pub type MResult<T> = Result<T, Error>;
#[derive(Debug)]
pub enum Error {
Io(io::Error),
IndexAlreadyExists,
MissingPrimaryKey,
SchemaMissing,
WordIndexMissing,
MissingDocumentId,
MaxFieldsLimitExceeded,
Schema(meilisearch_schema::Error),
Zlmdb(heed::Error),
Fst(fst::Error),
SerdeJson(SerdeJsonError),
Bincode(bincode::Error),
Serializer(SerializerError),
Deserializer(DeserializerError),
UnsupportedOperation(UnsupportedOperation),
FilterParseError(PestError<Rule>)
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error)
}
}
impl From<PestError<Rule>> for Error {
fn from(error: PestError<Rule>) -> Error {
Error::FilterParseError(error.renamed_rules(|r| {
let s = match r {
Rule::or => "OR",
Rule::and => "AND",
Rule::not => "NOT",
Rule::string => "string",
Rule::word => "word",
Rule::greater => "field > value",
Rule::less => "field < value",
Rule::eq => "field = value",
Rule::leq => "field <= value",
Rule::geq => "field >= value",
Rule::key => "key",
_ => "other",
};
s.to_string()
}))
}
}
impl From<meilisearch_schema::Error> for Error {
fn from(error: meilisearch_schema::Error) -> Error {
Error::Schema(error)
}
}
impl From<HeedError> for Error {
fn from(error: HeedError) -> Error {
Error::Zlmdb(error)
}
}
impl From<FstError> for Error {
fn from(error: FstError) -> Error {
Error::Fst(error)
}
}
impl From<SerdeJsonError> for Error {
fn from(error: SerdeJsonError) -> Error {
Error::SerdeJson(error)
}
}
impl From<BincodeError> for Error {
fn from(error: BincodeError) -> Error {
Error::Bincode(error)
}
}
impl From<SerializerError> for Error {
fn from(error: SerializerError) -> Error {
Error::Serializer(error)
}
}
impl From<DeserializerError> for Error {
fn from(error: DeserializerError) -> Error {
Error::Deserializer(error)
}
}
impl From<UnsupportedOperation> for Error {
fn from(op: UnsupportedOperation) -> Error {
Error::UnsupportedOperation(op)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
Io(e) => write!(f, "{}", e),
IndexAlreadyExists => write!(f, "index already exists"),
MissingPrimaryKey => write!(f, "schema cannot be built without a primary key"),
SchemaMissing => write!(f, "this index does not have a schema"),
WordIndexMissing => write!(f, "this index does not have a word index"),
MissingDocumentId => write!(f, "document id is missing"),
MaxFieldsLimitExceeded => write!(f, "maximum number of fields in a document exceeded"),
Schema(e) => write!(f, "schema error; {}", e),
Zlmdb(e) => write!(f, "heed error; {}", e),
Fst(e) => write!(f, "fst error; {}", e),
SerdeJson(e) => write!(f, "serde json error; {}", e),
Bincode(e) => write!(f, "bincode error; {}", e),
Serializer(e) => write!(f, "serializer error; {}", e),
Deserializer(e) => write!(f, "deserializer error; {}", e),
UnsupportedOperation(op) => write!(f, "unsupported operation; {}", op),
FilterParseError(e) => write!(f, "error parsing filter; {}", e),
}
}
}
impl error::Error for Error {}
#[derive(Debug)]
pub enum UnsupportedOperation {
SchemaAlreadyExists,
CannotUpdateSchemaPrimaryKey,
CannotReorderSchemaAttribute,
CanOnlyIntroduceNewSchemaAttributesAtEnd,
CannotRemoveSchemaAttribute,
}
impl fmt::Display for UnsupportedOperation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::UnsupportedOperation::*;
match self {
SchemaAlreadyExists => write!(f, "Cannot update index which already have a schema"),
CannotUpdateSchemaPrimaryKey => write!(f, "Cannot update the primary key of a schema"),
CannotReorderSchemaAttribute => write!(f, "Cannot reorder the attributes of a schema"),
CanOnlyIntroduceNewSchemaAttributesAtEnd => {
write!(f, "Can only introduce new attributes at end of a schema")
}
CannotRemoveSchemaAttribute => write!(f, "Cannot remove attributes from a schema"),
}
}
}

View File

@ -0,0 +1,277 @@
use std::str::FromStr;
use std::cmp::Ordering;
use crate::error::Error;
use crate::{store::Index, DocumentId, MainT};
use heed::RoTxn;
use meilisearch_schema::{FieldId, Schema};
use pest::error::{Error as PestError, ErrorVariant};
use pest::iterators::Pair;
use serde_json::{Value, Number};
use super::parser::Rule;
#[derive(Debug, PartialEq)]
enum ConditionType {
Greater,
Less,
Equal,
LessEqual,
GreaterEqual,
NotEqual,
}
/// We need to infer type when the filter is constructed
/// and match every possible types it can be parsed into.
#[derive(Debug)]
struct ConditionValue<'a> {
string: &'a str,
boolean: Option<bool>,
number: Option<Number>
}
impl<'a> ConditionValue<'a> {
pub fn new(value: &Pair<'a, Rule>) -> Self {
let value = match value.as_rule() {
Rule::string | Rule::word => {
let string = value.as_str();
let boolean = match value.as_str() {
"true" => Some(true),
"false" => Some(false),
_ => None,
};
let number = Number::from_str(value.as_str()).ok();
ConditionValue { string, boolean, number }
},
_ => unreachable!(),
};
value
}
pub fn as_str(&self) -> &str {
self.string.as_ref()
}
pub fn as_number(&self) -> Option<&Number> {
self.number.as_ref()
}
pub fn as_bool(&self) -> Option<bool> {
self.boolean
}
}
#[derive(Debug)]
pub struct Condition<'a> {
field: FieldId,
condition: ConditionType,
value: ConditionValue<'a>
}
fn get_field_value<'a>(schema: &Schema, pair: Pair<'a, Rule>) -> Result<(FieldId, ConditionValue<'a>), Error> {
let mut items = pair.into_inner();
// lexing ensures that we at least have a key
let key = items.next().unwrap();
let field = schema
.id(key.as_str())
.ok_or::<PestError<Rule>>(PestError::new_from_span(
ErrorVariant::CustomError {
message: format!(
"attribute `{}` not found, available attributes are: {}",
key.as_str(),
schema.names().collect::<Vec<_>>().join(", ")
),
},
key.as_span()))?;
let value = ConditionValue::new(&items.next().unwrap());
Ok((field, value))
}
// undefined behavior with big numbers
fn compare_numbers(lhs: &Number, rhs: &Number) -> Option<Ordering> {
match (lhs.as_i64(), lhs.as_u64(), lhs.as_f64(),
rhs.as_i64(), rhs.as_u64(), rhs.as_f64()) {
// i64 u64 f64 i64 u64 f64
(Some(lhs), _, _, Some(rhs), _, _) => lhs.partial_cmp(&rhs),
(_, Some(lhs), _, _, Some(rhs), _) => lhs.partial_cmp(&rhs),
(_, _, Some(lhs), _, _, Some(rhs)) => lhs.partial_cmp(&rhs),
(_, _, _, _, _, _) => None,
}
}
impl<'a> Condition<'a> {
pub fn less(
item: Pair<'a, Rule>,
schema: &'a Schema,
) -> Result<Self, Error> {
let (field, value) = get_field_value(schema, item)?;
let condition = ConditionType::Less;
Ok(Self { field, condition, value })
}
pub fn greater(
item: Pair<'a, Rule>,
schema: &'a Schema,
) -> Result<Self, Error> {
let (field, value) = get_field_value(schema, item)?;
let condition = ConditionType::Greater;
Ok(Self { field, condition, value })
}
pub fn neq(
item: Pair<'a, Rule>,
schema: &'a Schema,
) -> Result<Self, Error> {
let (field, value) = get_field_value(schema, item)?;
let condition = ConditionType::NotEqual;
Ok(Self { field, condition, value })
}
pub fn geq(
item: Pair<'a, Rule>,
schema: &'a Schema,
) -> Result<Self, Error> {
let (field, value) = get_field_value(schema, item)?;
let condition = ConditionType::GreaterEqual;
Ok(Self { field, condition, value })
}
pub fn leq(
item: Pair<'a, Rule>,
schema: &'a Schema,
) -> Result<Self, Error> {
let (field, value) = get_field_value(schema, item)?;
let condition = ConditionType::LessEqual;
Ok(Self { field, condition, value })
}
pub fn eq(
item: Pair<'a, Rule>,
schema: &'a Schema,
) -> Result<Self, Error> {
let (field, value) = get_field_value(schema, item)?;
let condition = ConditionType::Equal;
Ok(Self { field, condition, value })
}
pub fn test(
&self,
reader: &RoTxn<MainT>,
index: &Index,
document_id: DocumentId,
) -> Result<bool, Error> {
match index.document_attribute::<Value>(reader, document_id, self.field)? {
Some(Value::Array(values)) => Ok(values.iter().any(|v| self.match_value(Some(v)))),
other => Ok(self.match_value(other.as_ref())),
}
}
fn match_value(&self, value: Option<&Value>) -> bool {
match value {
Some(Value::String(s)) => {
let value = self.value.as_str();
match self.condition {
ConditionType::Equal => unicase::eq(value, &s),
ConditionType::NotEqual => !unicase::eq(value, &s),
_ => false
}
},
Some(Value::Number(n)) => {
if let Some(value) = self.value.as_number() {
if let Some(ord) = compare_numbers(&n, value) {
let res = match self.condition {
ConditionType::Equal => ord == Ordering::Equal,
ConditionType::NotEqual => ord != Ordering::Equal,
ConditionType::GreaterEqual => ord != Ordering::Less,
ConditionType::LessEqual => ord != Ordering::Greater,
ConditionType::Greater => ord == Ordering::Greater,
ConditionType::Less => ord == Ordering::Less,
};
return res
}
}
false
},
Some(Value::Bool(b)) => {
if let Some(value) = self.value.as_bool() {
let res = match self.condition {
ConditionType::Equal => *b == value,
ConditionType::NotEqual => *b != value,
_ => false
};
return res
}
false
},
// if field is not supported (or not found), all values are different from it,
// so != should always return true in this case.
_ => self.condition == ConditionType::NotEqual,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use serde_json::Number;
use std::cmp::Ordering;
#[test]
fn test_number_comp() {
// test both u64
let n1 = Number::from(1u64);
let n2 = Number::from(2u64);
assert_eq!(Some(Ordering::Less), compare_numbers(&n1, &n2));
assert_eq!(Some(Ordering::Greater), compare_numbers(&n2, &n1));
let n1 = Number::from(1u64);
let n2 = Number::from(1u64);
assert_eq!(Some(Ordering::Equal), compare_numbers(&n1, &n2));
// test both i64
let n1 = Number::from(1i64);
let n2 = Number::from(2i64);
assert_eq!(Some(Ordering::Less), compare_numbers(&n1, &n2));
assert_eq!(Some(Ordering::Greater), compare_numbers(&n2, &n1));
let n1 = Number::from(1i64);
let n2 = Number::from(1i64);
assert_eq!(Some(Ordering::Equal), compare_numbers(&n1, &n2));
// test both f64
let n1 = Number::from_f64(1f64).unwrap();
let n2 = Number::from_f64(2f64).unwrap();
assert_eq!(Some(Ordering::Less), compare_numbers(&n1, &n2));
assert_eq!(Some(Ordering::Greater), compare_numbers(&n2, &n1));
let n1 = Number::from_f64(1f64).unwrap();
let n2 = Number::from_f64(1f64).unwrap();
assert_eq!(Some(Ordering::Equal), compare_numbers(&n1, &n2));
// test one u64 and one f64
let n1 = Number::from_f64(1f64).unwrap();
let n2 = Number::from(2u64);
assert_eq!(Some(Ordering::Less), compare_numbers(&n1, &n2));
assert_eq!(Some(Ordering::Greater), compare_numbers(&n2, &n1));
// equality
let n1 = Number::from_f64(1f64).unwrap();
let n2 = Number::from(1u64);
assert_eq!(Some(Ordering::Equal), compare_numbers(&n1, &n2));
assert_eq!(Some(Ordering::Equal), compare_numbers(&n2, &n1));
// float is neg
let n1 = Number::from_f64(-1f64).unwrap();
let n2 = Number::from(1u64);
assert_eq!(Some(Ordering::Less), compare_numbers(&n1, &n2));
assert_eq!(Some(Ordering::Greater), compare_numbers(&n2, &n1));
// float is too big
let n1 = Number::from_f64(std::f64::MAX).unwrap();
let n2 = Number::from(1u64);
assert_eq!(Some(Ordering::Greater), compare_numbers(&n1, &n2));
assert_eq!(Some(Ordering::Less), compare_numbers(&n2, &n1));
// misc
let n1 = Number::from_f64(std::f64::MAX).unwrap();
let n2 = Number::from(std::u64::MAX);
assert_eq!(Some(Ordering::Greater), compare_numbers(&n1, &n2));
assert_eq!(Some( Ordering::Less ), compare_numbers(&n2, &n1));
}
}

View File

@ -0,0 +1,127 @@
mod parser;
mod condition;
pub(crate) use parser::Rule;
use std::ops::Not;
use condition::Condition;
use crate::error::Error;
use crate::{DocumentId, MainT, store::Index};
use heed::RoTxn;
use meilisearch_schema::Schema;
use parser::{PREC_CLIMBER, FilterParser};
use pest::iterators::{Pair, Pairs};
use pest::Parser;
type FilterResult<'a> = Result<Filter<'a>, Error>;
#[derive(Debug)]
pub enum Filter<'a> {
Condition(Condition<'a>),
Or(Box<Self>, Box<Self>),
And(Box<Self>, Box<Self>),
Not(Box<Self>),
}
impl<'a> Filter<'a> {
pub fn parse(expr: &'a str, schema: &'a Schema) -> FilterResult<'a> {
let mut lexed = FilterParser::parse(Rule::prgm, expr.as_ref())?;
Self::build(lexed.next().unwrap().into_inner(), schema)
}
pub fn test(
&self,
reader: &RoTxn<MainT>,
index: &Index,
document_id: DocumentId,
) -> Result<bool, Error> {
use Filter::*;
match self {
Condition(c) => c.test(reader, index, document_id),
Or(lhs, rhs) => Ok(
lhs.test(reader, index, document_id)? || rhs.test(reader, index, document_id)?
),
And(lhs, rhs) => Ok(
lhs.test(reader, index, document_id)? && rhs.test(reader, index, document_id)?
),
Not(op) => op.test(reader, index, document_id).map(bool::not),
}
}
fn build(expression: Pairs<'a, Rule>, schema: &'a Schema) -> FilterResult<'a> {
PREC_CLIMBER.climb(
expression,
|pair: Pair<Rule>| match pair.as_rule() {
Rule::eq => Ok(Filter::Condition(Condition::eq(pair, schema)?)),
Rule::greater => Ok(Filter::Condition(Condition::greater(pair, schema)?)),
Rule::less => Ok(Filter::Condition(Condition::less(pair, schema)?)),
Rule::neq => Ok(Filter::Condition(Condition::neq(pair, schema)?)),
Rule::geq => Ok(Filter::Condition(Condition::geq(pair, schema)?)),
Rule::leq => Ok(Filter::Condition(Condition::leq(pair, schema)?)),
Rule::prgm => Self::build(pair.into_inner(), schema),
Rule::term => Self::build(pair.into_inner(), schema),
Rule::not => Ok(Filter::Not(Box::new(Self::build(
pair.into_inner(),
schema,
)?))),
_ => unreachable!(),
},
|lhs: FilterResult, op: Pair<Rule>, rhs: FilterResult| match op.as_rule() {
Rule::or => Ok(Filter::Or(Box::new(lhs?), Box::new(rhs?))),
Rule::and => Ok(Filter::And(Box::new(lhs?), Box::new(rhs?))),
_ => unreachable!(),
},
)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn invalid_syntax() {
assert!(FilterParser::parse(Rule::prgm, "field : id").is_err());
assert!(FilterParser::parse(Rule::prgm, "field=hello hello").is_err());
assert!(FilterParser::parse(Rule::prgm, "field=hello OR OR").is_err());
assert!(FilterParser::parse(Rule::prgm, "OR field:hello").is_err());
assert!(FilterParser::parse(Rule::prgm, r#"field="hello world"#).is_err());
assert!(FilterParser::parse(Rule::prgm, r#"field='hello world"#).is_err());
assert!(FilterParser::parse(Rule::prgm, "NOT field=").is_err());
assert!(FilterParser::parse(Rule::prgm, "N").is_err());
assert!(FilterParser::parse(Rule::prgm, "(field=1").is_err());
assert!(FilterParser::parse(Rule::prgm, "(field=1))").is_err());
assert!(FilterParser::parse(Rule::prgm, "field=1ORfield=2").is_err());
assert!(FilterParser::parse(Rule::prgm, "field=1 ( OR field=2)").is_err());
assert!(FilterParser::parse(Rule::prgm, "hello world=1").is_err());
assert!(FilterParser::parse(Rule::prgm, "").is_err());
assert!(FilterParser::parse(Rule::prgm, r#"((((((hello=world)))))"#).is_err());
}
#[test]
fn valid_syntax() {
assert!(FilterParser::parse(Rule::prgm, "field = id").is_ok());
assert!(FilterParser::parse(Rule::prgm, "field=id").is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field >= 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field <= 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field="hello world""#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field='hello world'"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field > 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field < 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field < 10 AND NOT field=5"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field < 10 AND NOT field > 7.5"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field=true OR NOT field=5"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"NOT field=true OR NOT field=5"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field='hello world' OR ( NOT field=true OR NOT field=5 )"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field='hello \'worl\'d' OR ( NOT field=true OR NOT field=5 )"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"field="hello \"worl\"d" OR ( NOT field=true OR NOT field=5 )"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"((((((hello=world))))))"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#""foo bar" > 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#""foo bar" = 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"'foo bar' = 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"'foo bar' <= 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"'foo bar' != 10"#).is_ok());
assert!(FilterParser::parse(Rule::prgm, r#"bar != 10"#).is_ok());
}
}

View File

@ -0,0 +1,28 @@
key = _{quoted | word}
value = _{quoted | word}
quoted = _{ (PUSH("'") | PUSH("\"")) ~ string ~ POP }
string = {char*}
word = ${(LETTER | NUMBER | "_" | "-" | ".")+}
char = _{ !(PEEK | "\\") ~ ANY
| "\\" ~ (PEEK | "\\" | "/" | "b" | "f" | "n" | "r" | "t")
| "\\" ~ ("u" ~ ASCII_HEX_DIGIT{4})}
condition = _{eq | greater | less | geq | leq | neq}
geq = {key ~ ">=" ~ value}
leq = {key ~ "<=" ~ value}
neq = {key ~ "!=" ~ value}
eq = {key ~ "=" ~ value}
greater = {key ~ ">" ~ value}
less = {key ~ "<" ~ value}
prgm = {SOI ~ expr ~ EOI}
expr = _{ ( term ~ (operation ~ term)* ) }
term = { ("(" ~ expr ~ ")") | condition | not }
operation = _{ and | or }
and = {"AND"}
or = {"OR"}
not = {"NOT" ~ term}
WHITESPACE = _{ " " }

View File

@ -0,0 +1,12 @@
use once_cell::sync::Lazy;
use pest::prec_climber::{Operator, Assoc, PrecClimber};
pub static PREC_CLIMBER: Lazy<PrecClimber<Rule>> = Lazy::new(|| {
use Assoc::*;
use Rule::*;
pest::prec_climber::PrecClimber::new(vec![Operator::new(or, Left), Operator::new(and, Left)])
});
#[derive(Parser)]
#[grammar = "filters/parser/grammar.pest"]
pub struct FilterParser;

View File

@ -0,0 +1,134 @@
use std::cmp::min;
use std::collections::BTreeMap;
use std::ops::{Index, IndexMut};
// A simple wrapper around vec so we can get contiguous but index it like it's 2D array.
struct N2Array<T> {
y_size: usize,
buf: Vec<T>,
}
impl<T: Clone> N2Array<T> {
fn new(x: usize, y: usize, value: T) -> N2Array<T> {
N2Array {
y_size: y,
buf: vec![value; x * y],
}
}
}
impl<T> Index<(usize, usize)> for N2Array<T> {
type Output = T;
#[inline]
fn index(&self, (x, y): (usize, usize)) -> &T {
&self.buf[(x * self.y_size) + y]
}
}
impl<T> IndexMut<(usize, usize)> for N2Array<T> {
#[inline]
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut T {
&mut self.buf[(x * self.y_size) + y]
}
}
pub fn prefix_damerau_levenshtein(source: &[u8], target: &[u8]) -> (u32, usize) {
let (n, m) = (source.len(), target.len());
assert!(
n <= m,
"the source string must be shorter than the target one"
);
if n == 0 {
return (m as u32, 0);
}
if m == 0 {
return (n as u32, 0);
}
if n == m && source == target {
return (0, m);
}
let inf = n + m;
let mut matrix = N2Array::new(n + 2, m + 2, 0);
matrix[(0, 0)] = inf;
for i in 0..n + 1 {
matrix[(i + 1, 0)] = inf;
matrix[(i + 1, 1)] = i;
}
for j in 0..m + 1 {
matrix[(0, j + 1)] = inf;
matrix[(1, j + 1)] = j;
}
let mut last_row = BTreeMap::new();
for (row, char_s) in source.iter().enumerate() {
let mut last_match_col = 0;
let row = row + 1;
for (col, char_t) in target.iter().enumerate() {
let col = col + 1;
let last_match_row = *last_row.get(&char_t).unwrap_or(&0);
let cost = if char_s == char_t { 0 } else { 1 };
let dist_add = matrix[(row, col + 1)] + 1;
let dist_del = matrix[(row + 1, col)] + 1;
let dist_sub = matrix[(row, col)] + cost;
let dist_trans = matrix[(last_match_row, last_match_col)]
+ (row - last_match_row - 1)
+ 1
+ (col - last_match_col - 1);
let dist = min(min(dist_add, dist_del), min(dist_sub, dist_trans));
matrix[(row + 1, col + 1)] = dist;
if cost == 0 {
last_match_col = col;
}
}
last_row.insert(char_s, row);
}
let mut minimum = (u32::max_value(), 0);
for x in n..=m {
let dist = matrix[(n + 1, x + 1)] as u32;
if dist < minimum.0 {
minimum = (dist, x)
}
}
minimum
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn matched_length() {
let query = "Levenste";
let text = "Levenshtein";
let (dist, length) = prefix_damerau_levenshtein(query.as_bytes(), text.as_bytes());
assert_eq!(dist, 1);
assert_eq!(&text[..length], "Levenshte");
}
#[test]
#[should_panic]
fn matched_length_panic() {
let query = "Levenshtein";
let text = "Levenste";
// this function will panic if source if longer than target
prefix_damerau_levenshtein(query.as_bytes(), text.as_bytes());
}
}

195
meilisearch-core/src/lib.rs Normal file
View File

@ -0,0 +1,195 @@
#[cfg(test)]
#[macro_use]
extern crate assert_matches;
#[macro_use]
extern crate pest_derive;
mod automaton;
mod bucket_sort;
mod database;
mod distinct_map;
mod error;
mod filters;
mod levenshtein;
mod number;
mod query_builder;
mod query_tree;
mod query_words_mapper;
mod ranked_map;
mod raw_document;
mod reordered_attrs;
mod update;
pub mod settings;
pub mod criterion;
pub mod raw_indexer;
pub mod serde;
pub mod store;
pub use self::database::{BoxUpdateFn, Database, MainT, UpdateT};
pub use self::error::{Error, HeedError, FstError, MResult, pest_error};
pub use self::filters::Filter;
pub use self::number::{Number, ParseNumberError};
pub use self::ranked_map::RankedMap;
pub use self::raw_document::RawDocument;
pub use self::store::Index;
pub use self::update::{EnqueuedUpdateResult, ProcessedUpdateResult, UpdateStatus, UpdateType};
pub use meilisearch_types::{DocIndex, DocumentId, Highlight};
pub use meilisearch_schema::Schema;
pub use query_words_mapper::QueryWordsMapper;
use std::convert::TryFrom;
use std::collections::HashMap;
use compact_arena::SmallArena;
use log::{error, trace};
use crate::bucket_sort::PostingsListView;
use crate::levenshtein::prefix_damerau_levenshtein;
use crate::query_tree::{QueryId, QueryKind};
use crate::reordered_attrs::ReorderedAttrs;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Document {
pub id: DocumentId,
pub highlights: Vec<Highlight>,
#[cfg(test)]
pub matches: Vec<crate::bucket_sort::SimpleMatch>,
}
fn highlights_from_raw_document<'a, 'tag, 'txn>(
raw_document: &RawDocument<'a, 'tag>,
queries_kinds: &HashMap<QueryId, &QueryKind>,
arena: &SmallArena<'tag, PostingsListView<'txn>>,
searchable_attrs: Option<&ReorderedAttrs>,
schema: &Schema,
) -> Vec<Highlight>
{
let mut highlights = Vec::new();
for bm in raw_document.bare_matches.iter() {
let postings_list = &arena[bm.postings_list];
let input = postings_list.input();
let kind = &queries_kinds.get(&bm.query_index);
for di in postings_list.iter() {
let covered_area = match kind {
Some(QueryKind::NonTolerant(query)) | Some(QueryKind::Tolerant(query)) => {
let len = if query.len() > input.len() {
input.len()
} else {
prefix_damerau_levenshtein(query.as_bytes(), input).1
};
u16::try_from(len).unwrap_or(u16::max_value())
},
_ => di.char_length,
};
let attribute = searchable_attrs
.and_then(|sa| sa.reverse(di.attribute))
.unwrap_or(di.attribute);
let attribute = match schema.indexed_pos_to_field_id(attribute) {
Some(field_id) => field_id.0,
None => {
error!("Cannot convert indexed_pos {} to field_id", attribute);
trace!("Schema is compromized; {:?}", schema);
continue
}
};
let highlight = Highlight {
attribute,
char_index: di.char_index,
char_length: covered_area,
};
highlights.push(highlight);
}
}
highlights
}
impl Document {
#[cfg(not(test))]
pub fn from_highlights(id: DocumentId, highlights: &[Highlight]) -> Document {
Document { id, highlights: highlights.to_owned() }
}
#[cfg(test)]
pub fn from_highlights(id: DocumentId, highlights: &[Highlight]) -> Document {
Document { id, highlights: highlights.to_owned(), matches: Vec::new() }
}
#[cfg(not(test))]
pub fn from_raw<'a, 'tag, 'txn>(
raw_document: RawDocument<'a, 'tag>,
queries_kinds: &HashMap<QueryId, &QueryKind>,
arena: &SmallArena<'tag, PostingsListView<'txn>>,
searchable_attrs: Option<&ReorderedAttrs>,
schema: &Schema,
) -> Document
{
let highlights = highlights_from_raw_document(
&raw_document,
queries_kinds,
arena,
searchable_attrs,
schema,
);
Document { id: raw_document.id, highlights }
}
#[cfg(test)]
pub fn from_raw<'a, 'tag, 'txn>(
raw_document: RawDocument<'a, 'tag>,
queries_kinds: &HashMap<QueryId, &QueryKind>,
arena: &SmallArena<'tag, PostingsListView<'txn>>,
searchable_attrs: Option<&ReorderedAttrs>,
schema: &Schema,
) -> Document
{
use crate::bucket_sort::SimpleMatch;
let highlights = highlights_from_raw_document(
&raw_document,
queries_kinds,
arena,
searchable_attrs,
schema,
);
let mut matches = Vec::new();
for sm in raw_document.processed_matches {
let attribute = searchable_attrs
.and_then(|sa| sa.reverse(sm.attribute))
.unwrap_or(sm.attribute);
let attribute = match schema.indexed_pos_to_field_id(attribute) {
Some(field_id) => field_id.0,
None => {
error!("Cannot convert indexed_pos {} to field_id", attribute);
trace!("Schema is compromized; {:?}", schema);
continue
}
};
matches.push(SimpleMatch { attribute, ..sm });
}
matches.sort_unstable();
Document { id: raw_document.id, highlights, matches }
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem;
#[test]
fn docindex_mem_size() {
assert_eq!(mem::size_of::<DocIndex>(), 16);
}
}

View File

@ -0,0 +1,120 @@
use std::cmp::Ordering;
use std::fmt;
use std::num::{ParseFloatError, ParseIntError};
use std::str::FromStr;
use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Copy, Clone, Hash)]
pub enum Number {
Unsigned(u64),
Signed(i64),
Float(OrderedFloat<f64>),
Null,
}
impl Default for Number {
fn default() -> Self {
Self::Null
}
}
impl FromStr for Number {
type Err = ParseNumberError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let uint_error = match u64::from_str(s) {
Ok(unsigned) => return Ok(Number::Unsigned(unsigned)),
Err(error) => error,
};
let int_error = match i64::from_str(s) {
Ok(signed) => return Ok(Number::Signed(signed)),
Err(error) => error,
};
let float_error = match f64::from_str(s) {
Ok(float) => return Ok(Number::Float(OrderedFloat(float))),
Err(error) => error,
};
Err(ParseNumberError {
uint_error,
int_error,
float_error,
})
}
}
impl PartialEq for Number {
fn eq(&self, other: &Number) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for Number {}
impl PartialOrd for Number {
fn partial_cmp(&self, other: &Number) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Number {
fn cmp(&self, other: &Self) -> Ordering {
use Number::{Float, Signed, Unsigned, Null};
match (*self, *other) {
(Unsigned(a), Unsigned(b)) => a.cmp(&b),
(Unsigned(a), Signed(b)) => {
if b < 0 {
Ordering::Greater
} else {
a.cmp(&(b as u64))
}
}
(Unsigned(a), Float(b)) => (OrderedFloat(a as f64)).cmp(&b),
(Signed(a), Unsigned(b)) => {
if a < 0 {
Ordering::Less
} else {
(a as u64).cmp(&b)
}
}
(Signed(a), Signed(b)) => a.cmp(&b),
(Signed(a), Float(b)) => OrderedFloat(a as f64).cmp(&b),
(Float(a), Unsigned(b)) => a.cmp(&OrderedFloat(b as f64)),
(Float(a), Signed(b)) => a.cmp(&OrderedFloat(b as f64)),
(Float(a), Float(b)) => a.cmp(&b),
(Null, Null) => Ordering::Equal,
(_, Null) => Ordering::Less,
(Null, _) => Ordering::Greater,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParseNumberError {
uint_error: ParseIntError,
int_error: ParseIntError,
float_error: ParseFloatError,
}
impl fmt::Display for ParseNumberError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.uint_error == self.int_error {
write!(
f,
"can not parse number: {}, {}",
self.uint_error, self.float_error
)
} else {
write!(
f,
"can not parse number: {}, {}, {}",
self.uint_error, self.int_error, self.float_error
)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,560 @@
use std::borrow::Cow;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use std::ops::Range;
use std::time::Instant;
use std::{cmp, fmt, iter::once};
use fst::{IntoStreamer, Streamer};
use itertools::{EitherOrBoth, merge_join_by};
use meilisearch_tokenizer::split_query_string;
use sdset::{Set, SetBuf, SetOperation};
use log::debug;
use crate::database::MainT;
use crate::{store, DocumentId, DocIndex, MResult};
use crate::automaton::{normalize_str, build_dfa, build_prefix_dfa, build_exact_dfa};
use crate::QueryWordsMapper;
#[derive(Clone, PartialEq, Eq, Hash)]
pub enum Operation {
And(Vec<Operation>),
Or(Vec<Operation>),
Query(Query),
}
impl fmt::Debug for Operation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn pprint_tree(f: &mut fmt::Formatter<'_>, op: &Operation, depth: usize) -> fmt::Result {
match op {
Operation::And(children) => {
writeln!(f, "{:1$}AND", "", depth * 2)?;
children.iter().try_for_each(|c| pprint_tree(f, c, depth + 1))
},
Operation::Or(children) => {
writeln!(f, "{:1$}OR", "", depth * 2)?;
children.iter().try_for_each(|c| pprint_tree(f, c, depth + 1))
},
Operation::Query(query) => writeln!(f, "{:2$}{:?}", "", query, depth * 2),
}
}
pprint_tree(f, self, 0)
}
}
impl Operation {
fn tolerant(id: QueryId, prefix: bool, s: &str) -> Operation {
Operation::Query(Query { id, prefix, exact: true, kind: QueryKind::Tolerant(s.to_string()) })
}
fn non_tolerant(id: QueryId, prefix: bool, s: &str) -> Operation {
Operation::Query(Query { id, prefix, exact: true, kind: QueryKind::NonTolerant(s.to_string()) })
}
fn phrase2(id: QueryId, prefix: bool, (left, right): (&str, &str)) -> Operation {
let kind = QueryKind::Phrase(vec![left.to_owned(), right.to_owned()]);
Operation::Query(Query { id, prefix, exact: true, kind })
}
}
pub type QueryId = usize;
#[derive(Clone, Eq)]
pub struct Query {
pub id: QueryId,
pub prefix: bool,
pub exact: bool,
pub kind: QueryKind,
}
impl PartialEq for Query {
fn eq(&self, other: &Self) -> bool {
self.prefix == other.prefix && self.kind == other.kind
}
}
impl Hash for Query {
fn hash<H: Hasher>(&self, state: &mut H) {
self.prefix.hash(state);
self.kind.hash(state);
}
}
#[derive(Clone, PartialEq, Eq, Hash)]
pub enum QueryKind {
Tolerant(String),
NonTolerant(String),
Phrase(Vec<String>),
}
impl fmt::Debug for Query {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Query { id, prefix, kind, .. } = self;
let prefix = if *prefix { String::from("Prefix") } else { String::default() };
match kind {
QueryKind::NonTolerant(word) => {
f.debug_struct(&(prefix + "NonTolerant")).field("id", &id).field("word", &word).finish()
},
QueryKind::Tolerant(word) => {
f.debug_struct(&(prefix + "Tolerant")).field("id", &id).field("word", &word).finish()
},
QueryKind::Phrase(words) => {
f.debug_struct(&(prefix + "Phrase")).field("id", &id).field("words", &words).finish()
},
}
}
}
#[derive(Debug, Default)]
pub struct PostingsList {
docids: SetBuf<DocumentId>,
matches: SetBuf<DocIndex>,
}
pub struct Context {
pub words_set: fst::Set,
pub stop_words: fst::Set,
pub synonyms: store::Synonyms,
pub postings_lists: store::PostingsLists,
pub prefix_postings_lists: store::PrefixPostingsListsCache,
}
fn split_best_frequency<'a>(reader: &heed::RoTxn<MainT>, ctx: &Context, word: &'a str) -> MResult<Option<(&'a str, &'a str)>> {
let chars = word.char_indices().skip(1);
let mut best = None;
for (i, _) in chars {
let (left, right) = word.split_at(i);
let left_freq = ctx.postings_lists
.postings_list(reader, left.as_bytes())?
.map(|p| p.docids.len())
.unwrap_or(0);
let right_freq = ctx.postings_lists
.postings_list(reader, right.as_bytes())?
.map(|p| p.docids.len())
.unwrap_or(0);
let min_freq = cmp::min(left_freq, right_freq);
if min_freq != 0 && best.map_or(true, |(old, _, _)| min_freq > old) {
best = Some((min_freq, left, right));
}
}
Ok(best.map(|(_, l, r)| (l, r)))
}
fn fetch_synonyms(reader: &heed::RoTxn<MainT>, ctx: &Context, words: &[&str]) -> MResult<Vec<Vec<String>>> {
let words = normalize_str(&words.join(" "));
let set = ctx.synonyms.synonyms(reader, words.as_bytes())?.unwrap_or_default();
let mut strings = Vec::new();
let mut stream = set.stream();
while let Some(input) = stream.next() {
if let Ok(input) = std::str::from_utf8(input) {
let alts = input.split_ascii_whitespace().map(ToOwned::to_owned).collect();
strings.push(alts);
}
}
Ok(strings)
}
fn create_operation<I, F>(iter: I, f: F) -> Operation
where I: IntoIterator<Item=Operation>,
F: Fn(Vec<Operation>) -> Operation,
{
let mut iter = iter.into_iter();
match (iter.next(), iter.next()) {
(Some(first), None) => first,
(first, second) => f(first.into_iter().chain(second).chain(iter).collect()),
}
}
const MAX_NGRAM: usize = 3;
pub fn create_query_tree(
reader: &heed::RoTxn<MainT>,
ctx: &Context,
query: &str,
) -> MResult<(Operation, HashMap<QueryId, Range<usize>>)>
{
let words = split_query_string(query).map(str::to_lowercase);
let words = words.filter(|w| !ctx.stop_words.contains(w));
let words: Vec<_> = words.enumerate().collect();
let mut mapper = QueryWordsMapper::new(words.iter().map(|(_, w)| w));
fn create_inner(
reader: &heed::RoTxn<MainT>,
ctx: &Context,
mapper: &mut QueryWordsMapper,
words: &[(usize, String)],
) -> MResult<Vec<Operation>>
{
let mut alts = Vec::new();
for ngram in 1..=MAX_NGRAM {
if let Some(group) = words.get(..ngram) {
let mut group_ops = Vec::new();
let tail = &words[ngram..];
let is_last = tail.is_empty();
let mut group_alts = Vec::new();
match group {
[(id, word)] => {
let mut idgen = ((id + 1) * 100)..;
let range = (*id)..id+1;
let phrase = split_best_frequency(reader, ctx, word)?
.map(|ws| {
let id = idgen.next().unwrap();
idgen.next().unwrap();
mapper.declare(range.clone(), id, &[ws.0, ws.1]);
Operation::phrase2(id, is_last, ws)
});
let synonyms = fetch_synonyms(reader, ctx, &[word])?
.into_iter()
.map(|alts| {
let exact = alts.len() == 1;
let id = idgen.next().unwrap();
mapper.declare(range.clone(), id, &alts);
let mut idgen = once(id).chain(&mut idgen);
let iter = alts.into_iter().map(|w| {
let id = idgen.next().unwrap();
let kind = QueryKind::NonTolerant(w);
Operation::Query(Query { id, prefix: false, exact, kind })
});
create_operation(iter, Operation::And)
});
let original = Operation::tolerant(*id, is_last, word);
group_alts.push(original);
group_alts.extend(synonyms.chain(phrase));
},
words => {
let id = words[0].0;
let mut idgen = ((id + 1) * 100_usize.pow(ngram as u32))..;
let range = id..id+ngram;
let words: Vec<_> = words.iter().map(|(_, s)| s.as_str()).collect();
for synonym in fetch_synonyms(reader, ctx, &words)? {
let exact = synonym.len() == 1;
let id = idgen.next().unwrap();
mapper.declare(range.clone(), id, &synonym);
let mut idgen = once(id).chain(&mut idgen);
let synonym = synonym.into_iter().map(|s| {
let id = idgen.next().unwrap();
let kind = QueryKind::NonTolerant(s);
Operation::Query(Query { id, prefix: false, exact, kind })
});
group_alts.push(create_operation(synonym, Operation::And));
}
let id = idgen.next().unwrap();
let concat = words.concat();
mapper.declare(range.clone(), id, &[&concat]);
group_alts.push(Operation::non_tolerant(id, is_last, &concat));
}
}
group_ops.push(create_operation(group_alts, Operation::Or));
if !tail.is_empty() {
let tail_ops = create_inner(reader, ctx, mapper, tail)?;
group_ops.push(create_operation(tail_ops, Operation::Or));
}
alts.push(create_operation(group_ops, Operation::And));
}
}
Ok(alts)
}
let alternatives = create_inner(reader, ctx, &mut mapper, &words)?;
let operation = Operation::Or(alternatives);
let mapping = mapper.mapping();
Ok((operation, mapping))
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct PostingsKey<'o> {
pub query: &'o Query,
pub input: Vec<u8>,
pub distance: u8,
pub is_exact: bool,
}
pub type Postings<'o, 'txn> = HashMap<PostingsKey<'o>, Cow<'txn, Set<DocIndex>>>;
pub type Cache<'o, 'txn> = HashMap<&'o Operation, Cow<'txn, Set<DocumentId>>>;
pub struct QueryResult<'o, 'txn> {
pub docids: Cow<'txn, Set<DocumentId>>,
pub queries: Postings<'o, 'txn>,
}
pub fn traverse_query_tree<'o, 'txn>(
reader: &'txn heed::RoTxn<MainT>,
ctx: &Context,
tree: &'o Operation,
) -> MResult<QueryResult<'o, 'txn>>
{
fn execute_and<'o, 'txn>(
reader: &'txn heed::RoTxn<MainT>,
ctx: &Context,
cache: &mut Cache<'o, 'txn>,
postings: &mut Postings<'o, 'txn>,
depth: usize,
operations: &'o [Operation],
) -> MResult<Cow<'txn, Set<DocumentId>>>
{
debug!("{:1$}AND", "", depth * 2);
let before = Instant::now();
let mut results = Vec::new();
for op in operations {
if cache.get(op).is_none() {
let docids = match op {
Operation::And(ops) => execute_and(reader, ctx, cache, postings, depth + 1, &ops)?,
Operation::Or(ops) => execute_or(reader, ctx, cache, postings, depth + 1, &ops)?,
Operation::Query(query) => execute_query(reader, ctx, postings, depth + 1, &query)?,
};
cache.insert(op, docids);
}
}
for op in operations {
if let Some(docids) = cache.get(op) {
results.push(docids.as_ref());
}
}
let op = sdset::multi::Intersection::new(results);
let docids = op.into_set_buf();
debug!("{:3$}--- AND fetched {} documents in {:.02?}", "", docids.len(), before.elapsed(), depth * 2);
Ok(Cow::Owned(docids))
}
fn execute_or<'o, 'txn>(
reader: &'txn heed::RoTxn<MainT>,
ctx: &Context,
cache: &mut Cache<'o, 'txn>,
postings: &mut Postings<'o, 'txn>,
depth: usize,
operations: &'o [Operation],
) -> MResult<Cow<'txn, Set<DocumentId>>>
{
debug!("{:1$}OR", "", depth * 2);
let before = Instant::now();
let mut results = Vec::new();
for op in operations {
if cache.get(op).is_none() {
let docids = match op {
Operation::And(ops) => execute_and(reader, ctx, cache, postings, depth + 1, &ops)?,
Operation::Or(ops) => execute_or(reader, ctx, cache, postings, depth + 1, &ops)?,
Operation::Query(query) => execute_query(reader, ctx, postings, depth + 1, &query)?,
};
cache.insert(op, docids);
}
}
for op in operations {
if let Some(docids) = cache.get(op) {
results.push(docids.as_ref());
}
}
let op = sdset::multi::Union::new(results);
let docids = op.into_set_buf();
debug!("{:3$}--- OR fetched {} documents in {:.02?}", "", docids.len(), before.elapsed(), depth * 2);
Ok(Cow::Owned(docids))
}
fn execute_query<'o, 'txn>(
reader: &'txn heed::RoTxn<MainT>,
ctx: &Context,
postings: &mut Postings<'o, 'txn>,
depth: usize,
query: &'o Query,
) -> MResult<Cow<'txn, Set<DocumentId>>>
{
let before = Instant::now();
let Query { prefix, kind, exact, .. } = query;
let docids: Cow<Set<_>> = match kind {
QueryKind::Tolerant(word) => {
if *prefix && word.len() <= 2 {
let prefix = {
let mut array = [0; 4];
let bytes = word.as_bytes();
array[..bytes.len()].copy_from_slice(bytes);
array
};
// We retrieve the cached postings lists for all
// the words that starts with this short prefix.
let result = ctx.prefix_postings_lists.prefix_postings_list(reader, prefix)?.unwrap_or_default();
let key = PostingsKey { query, input: word.clone().into_bytes(), distance: 0, is_exact: false };
postings.insert(key, result.matches);
let prefix_docids = &result.docids;
// We retrieve the exact postings list for the prefix,
// because we must consider these matches as exact.
let result = ctx.postings_lists.postings_list(reader, word.as_bytes())?.unwrap_or_default();
let key = PostingsKey { query, input: word.clone().into_bytes(), distance: 0, is_exact: true };
postings.insert(key, result.matches);
let exact_docids = &result.docids;
let before = Instant::now();
let docids = sdset::duo::Union::new(prefix_docids, exact_docids).into_set_buf();
debug!("{:4$}prefix docids ({} and {}) construction took {:.02?}",
"", prefix_docids.len(), exact_docids.len(), before.elapsed(), depth * 2);
Cow::Owned(docids)
} else {
let dfa = if *prefix { build_prefix_dfa(word) } else { build_dfa(word) };
let byte = word.as_bytes()[0];
let mut stream = if byte == u8::max_value() {
ctx.words_set.search(&dfa).ge(&[byte]).into_stream()
} else {
ctx.words_set.search(&dfa).ge(&[byte]).lt(&[byte + 1]).into_stream()
};
let before = Instant::now();
let mut results = Vec::new();
while let Some(input) = stream.next() {
if let Some(result) = ctx.postings_lists.postings_list(reader, input)? {
let distance = dfa.eval(input).to_u8();
let is_exact = *exact && distance == 0 && input.len() == word.len();
results.push(result.docids);
let key = PostingsKey { query, input: input.to_owned(), distance, is_exact };
postings.insert(key, result.matches);
}
}
debug!("{:3$}docids retrieval ({:?}) took {:.02?}", "", results.len(), before.elapsed(), depth * 2);
let before = Instant::now();
let docids = if results.len() > 10 {
let cap = results.iter().map(|dis| dis.len()).sum();
let mut docids = Vec::with_capacity(cap);
for dis in results {
docids.extend_from_slice(&dis);
}
SetBuf::from_dirty(docids)
} else {
let sets = results.iter().map(AsRef::as_ref).collect();
sdset::multi::Union::new(sets).into_set_buf()
};
debug!("{:2$}docids construction took {:.02?}", "", before.elapsed(), depth * 2);
Cow::Owned(docids)
}
},
QueryKind::NonTolerant(word) => {
// TODO support prefix and non-prefix exact DFA
let dfa = build_exact_dfa(word);
let byte = word.as_bytes()[0];
let mut stream = if byte == u8::max_value() {
ctx.words_set.search(&dfa).ge(&[byte]).into_stream()
} else {
ctx.words_set.search(&dfa).ge(&[byte]).lt(&[byte + 1]).into_stream()
};
let before = Instant::now();
let mut results = Vec::new();
while let Some(input) = stream.next() {
if let Some(result) = ctx.postings_lists.postings_list(reader, input)? {
let distance = dfa.eval(input).to_u8();
results.push(result.docids);
let key = PostingsKey { query, input: input.to_owned(), distance, is_exact: *exact };
postings.insert(key, result.matches);
}
}
debug!("{:3$}docids retrieval ({:?}) took {:.02?}", "", results.len(), before.elapsed(), depth * 2);
let before = Instant::now();
let docids = if results.len() > 10 {
let cap = results.iter().map(|dis| dis.len()).sum();
let mut docids = Vec::with_capacity(cap);
for dis in results {
docids.extend_from_slice(&dis);
}
SetBuf::from_dirty(docids)
} else {
let sets = results.iter().map(AsRef::as_ref).collect();
sdset::multi::Union::new(sets).into_set_buf()
};
debug!("{:2$}docids construction took {:.02?}", "", before.elapsed(), depth * 2);
Cow::Owned(docids)
},
QueryKind::Phrase(words) => {
// TODO support prefix and non-prefix exact DFA
if let [first, second] = words.as_slice() {
let first = ctx.postings_lists.postings_list(reader, first.as_bytes())?.unwrap_or_default();
let second = ctx.postings_lists.postings_list(reader, second.as_bytes())?.unwrap_or_default();
let iter = merge_join_by(first.matches.as_slice(), second.matches.as_slice(), |a, b| {
let x = (a.document_id, a.attribute, (a.word_index as u32) + 1);
let y = (b.document_id, b.attribute, b.word_index as u32);
x.cmp(&y)
});
let matches: Vec<_> = iter
.filter_map(EitherOrBoth::both)
.flat_map(|(a, b)| once(*a).chain(Some(*b)))
.collect();
let before = Instant::now();
let mut docids: Vec<_> = matches.iter().map(|m| m.document_id).collect();
docids.dedup();
let docids = SetBuf::new(docids).unwrap();
debug!("{:2$}docids construction took {:.02?}", "", before.elapsed(), depth * 2);
let matches = Cow::Owned(SetBuf::from_dirty(matches));
let key = PostingsKey { query, input: vec![], distance: 0, is_exact: true };
postings.insert(key, matches);
Cow::Owned(docids)
} else {
debug!("{:2$}{:?} skipped", "", words, depth * 2);
Cow::default()
}
},
};
debug!("{:4$}{:?} fetched {:?} documents in {:.02?}", "", query, docids.len(), before.elapsed(), depth * 2);
Ok(docids)
}
let mut cache = Cache::new();
let mut postings = Postings::new();
let docids = match tree {
Operation::And(ops) => execute_and(reader, ctx, &mut cache, &mut postings, 0, &ops)?,
Operation::Or(ops) => execute_or(reader, ctx, &mut cache, &mut postings, 0, &ops)?,
Operation::Query(query) => execute_query(reader, ctx, &mut postings, 0, &query)?,
};
Ok(QueryResult { docids, queries: postings })
}

View File

@ -0,0 +1,415 @@
use std::collections::HashMap;
use std::iter::FromIterator;
use std::ops::Range;
use intervaltree::{Element, IntervalTree};
pub type QueryId = usize;
pub struct QueryWordsMapper {
originals: Vec<String>,
mappings: HashMap<QueryId, (Range<usize>, Vec<String>)>,
}
impl QueryWordsMapper {
pub fn new<I, A>(originals: I) -> QueryWordsMapper
where I: IntoIterator<Item = A>,
A: ToString,
{
let originals = originals.into_iter().map(|s| s.to_string()).collect();
QueryWordsMapper { originals, mappings: HashMap::new() }
}
pub fn declare<I, A>(&mut self, range: Range<usize>, id: QueryId, replacement: I)
where I: IntoIterator<Item = A>,
A: ToString,
{
assert!(range.len() != 0);
assert!(self.originals.get(range.clone()).is_some());
assert!(id >= self.originals.len());
let replacement: Vec<_> = replacement.into_iter().map(|s| s.to_string()).collect();
assert!(!replacement.is_empty());
// We detect words at the end and at the front of the
// replacement that are common with the originals:
//
// x a b c d e f g
// ^^^/ \^^^
// a b x c d k j e f
// ^^^ ^^^
//
let left = &self.originals[..range.start];
let right = &self.originals[range.end..];
let common_left = longest_common_prefix(left, &replacement);
let common_right = longest_common_prefix(&replacement, right);
for i in 0..common_left {
let range = range.start - common_left + i..range.start - common_left + i + 1;
let replacement = vec![replacement[i].clone()];
self.mappings.insert(id + i, (range, replacement));
}
{
let replacement = replacement[common_left..replacement.len() - common_right].iter().cloned().collect();
self.mappings.insert(id + common_left, (range.clone(), replacement));
}
for i in 0..common_right {
let id = id + replacement.len() - common_right + i;
let range = range.end + i..range.end + i + 1;
let replacement = vec![replacement[replacement.len() - common_right + i].clone()];
self.mappings.insert(id, (range, replacement));
}
}
pub fn mapping(self) -> HashMap<QueryId, Range<usize>> {
let mappings = self.mappings.into_iter().map(|(i, (r, v))| (r, (i, v)));
let intervals = IntervalTree::from_iter(mappings);
let mut output = HashMap::new();
let mut offset = 0;
// We map each original word to the biggest number of
// associated words.
for i in 0..self.originals.len() {
let max = intervals.query_point(i)
.filter_map(|e| {
if e.range.end - 1 == i {
let len = e.value.1.iter().skip(i - e.range.start).count();
if len != 0 { Some(len) } else { None }
} else { None }
})
.max()
.unwrap_or(1);
let range = i + offset..i + offset + max;
output.insert(i, range);
offset += max - 1;
}
// We retrieve the range that each original word
// is mapped to and apply it to each of the words.
for i in 0..self.originals.len() {
let iter = intervals.query_point(i).filter(|e| e.range.end - 1 == i);
for Element { range, value: (id, words) } in iter {
// We ask for the complete range mapped to the area we map.
let start = output.get(&range.start).map(|r| r.start).unwrap_or(range.start);
let end = output.get(&(range.end - 1)).map(|r| r.end).unwrap_or(range.end);
let range = start..end;
// We map each query id to one word until the last,
// we map it to the remainings words.
let add = range.len() - words.len();
for (j, x) in range.take(words.len()).enumerate() {
let add = if j == words.len() - 1 { add } else { 0 }; // is last?
let range = x..x + 1 + add;
output.insert(id + j, range);
}
}
}
output
}
}
fn longest_common_prefix<T: Eq + std::fmt::Debug>(a: &[T], b: &[T]) -> usize {
let mut best = None;
for i in (0..a.len()).rev() {
let count = a[i..].iter().zip(b).take_while(|(a, b)| a == b).count();
best = match best {
Some(old) if count > old => Some(count),
Some(_) => break,
None => Some(count),
};
}
best.unwrap_or(0)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn original_unmodified() {
let query = ["new", "york", "city", "subway"];
// 0 1 2 3
let mut builder = QueryWordsMapper::new(&query);
// new york = new york city
builder.declare(0..2, 4, &["new", "york", "city"]);
// ^ 4 5 6
// new = new york city
builder.declare(0..1, 7, &["new", "york", "city"]);
// ^ 7 8 9
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // new
assert_eq!(mapping[&1], 1..2); // york
assert_eq!(mapping[&2], 2..3); // city
assert_eq!(mapping[&3], 3..4); // subway
assert_eq!(mapping[&4], 0..1); // new
assert_eq!(mapping[&5], 1..2); // york
assert_eq!(mapping[&6], 2..3); // city
assert_eq!(mapping[&7], 0..1); // new
assert_eq!(mapping[&8], 1..2); // york
assert_eq!(mapping[&9], 2..3); // city
}
#[test]
fn original_unmodified2() {
let query = ["new", "york", "city", "subway"];
// 0 1 2 3
let mut builder = QueryWordsMapper::new(&query);
// city subway = new york city underground train
builder.declare(2..4, 4, &["new", "york", "city", "underground", "train"]);
// ^ 4 5 6 7 8
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // new
assert_eq!(mapping[&1], 1..2); // york
assert_eq!(mapping[&2], 2..3); // city
assert_eq!(mapping[&3], 3..5); // subway
assert_eq!(mapping[&4], 0..1); // new
assert_eq!(mapping[&5], 1..2); // york
assert_eq!(mapping[&6], 2..3); // city
assert_eq!(mapping[&7], 3..4); // underground
assert_eq!(mapping[&8], 4..5); // train
}
#[test]
fn original_unmodified3() {
let query = ["a", "b", "x", "x", "a", "b", "c", "d", "e", "f", "g"];
// 0 1 2 3 4 5 6 7 8 9 10
let mut builder = QueryWordsMapper::new(&query);
// c d = a b x c d k j e f
builder.declare(6..8, 11, &["a", "b", "x", "c", "d", "k", "j", "e", "f"]);
// ^^ 11 12 13 14 15 16 17 18 19
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // a
assert_eq!(mapping[&1], 1..2); // b
assert_eq!(mapping[&2], 2..3); // x
assert_eq!(mapping[&3], 3..4); // x
assert_eq!(mapping[&4], 4..5); // a
assert_eq!(mapping[&5], 5..6); // b
assert_eq!(mapping[&6], 6..7); // c
assert_eq!(mapping[&7], 7..11); // d
assert_eq!(mapping[&8], 11..12); // e
assert_eq!(mapping[&9], 12..13); // f
assert_eq!(mapping[&10], 13..14); // g
assert_eq!(mapping[&11], 4..5); // a
assert_eq!(mapping[&12], 5..6); // b
assert_eq!(mapping[&13], 6..7); // x
assert_eq!(mapping[&14], 7..8); // c
assert_eq!(mapping[&15], 8..9); // d
assert_eq!(mapping[&16], 9..10); // k
assert_eq!(mapping[&17], 10..11); // j
assert_eq!(mapping[&18], 11..12); // e
assert_eq!(mapping[&19], 12..13); // f
}
#[test]
fn simple_growing() {
let query = ["new", "york", "subway"];
// 0 1 2
let mut builder = QueryWordsMapper::new(&query);
// new york = new york city
builder.declare(0..2, 3, &["new", "york", "city"]);
// ^ 3 4 5
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // new
assert_eq!(mapping[&1], 1..3); // york
assert_eq!(mapping[&2], 3..4); // subway
assert_eq!(mapping[&3], 0..1); // new
assert_eq!(mapping[&4], 1..2); // york
assert_eq!(mapping[&5], 2..3); // city
}
#[test]
fn same_place_growings() {
let query = ["NY", "subway"];
// 0 1
let mut builder = QueryWordsMapper::new(&query);
// NY = new york
builder.declare(0..1, 2, &["new", "york"]);
// ^ 2 3
// NY = new york city
builder.declare(0..1, 4, &["new", "york", "city"]);
// ^ 4 5 6
// NY = NYC
builder.declare(0..1, 7, &["NYC"]);
// ^ 7
// NY = new york city
builder.declare(0..1, 8, &["new", "york", "city"]);
// ^ 8 9 10
// subway = underground train
builder.declare(1..2, 11, &["underground", "train"]);
// ^ 11 12
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..3); // NY
assert_eq!(mapping[&1], 3..5); // subway
assert_eq!(mapping[&2], 0..1); // new
assert_eq!(mapping[&3], 1..3); // york
assert_eq!(mapping[&4], 0..1); // new
assert_eq!(mapping[&5], 1..2); // york
assert_eq!(mapping[&6], 2..3); // city
assert_eq!(mapping[&7], 0..3); // NYC
assert_eq!(mapping[&8], 0..1); // new
assert_eq!(mapping[&9], 1..2); // york
assert_eq!(mapping[&10], 2..3); // city
assert_eq!(mapping[&11], 3..4); // underground
assert_eq!(mapping[&12], 4..5); // train
}
#[test]
fn bigger_growing() {
let query = ["NYC", "subway"];
// 0 1
let mut builder = QueryWordsMapper::new(&query);
// NYC = new york city
builder.declare(0..1, 2, &["new", "york", "city"]);
// ^ 2 3 4
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..3); // NYC
assert_eq!(mapping[&1], 3..4); // subway
assert_eq!(mapping[&2], 0..1); // new
assert_eq!(mapping[&3], 1..2); // york
assert_eq!(mapping[&4], 2..3); // city
}
#[test]
fn middle_query_growing() {
let query = ["great", "awesome", "NYC", "subway"];
// 0 1 2 3
let mut builder = QueryWordsMapper::new(&query);
// NYC = new york city
builder.declare(2..3, 4, &["new", "york", "city"]);
// ^ 4 5 6
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // great
assert_eq!(mapping[&1], 1..2); // awesome
assert_eq!(mapping[&2], 2..5); // NYC
assert_eq!(mapping[&3], 5..6); // subway
assert_eq!(mapping[&4], 2..3); // new
assert_eq!(mapping[&5], 3..4); // york
assert_eq!(mapping[&6], 4..5); // city
}
#[test]
fn end_query_growing() {
let query = ["NYC", "subway"];
// 0 1
let mut builder = QueryWordsMapper::new(&query);
// NYC = new york city
builder.declare(1..2, 2, &["underground", "train"]);
// ^ 2 3
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // NYC
assert_eq!(mapping[&1], 1..3); // subway
assert_eq!(mapping[&2], 1..2); // underground
assert_eq!(mapping[&3], 2..3); // train
}
#[test]
fn multiple_growings() {
let query = ["great", "awesome", "NYC", "subway"];
// 0 1 2 3
let mut builder = QueryWordsMapper::new(&query);
// NYC = new york city
builder.declare(2..3, 4, &["new", "york", "city"]);
// ^ 4 5 6
// subway = underground train
builder.declare(3..4, 7, &["underground", "train"]);
// ^ 7 8
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // great
assert_eq!(mapping[&1], 1..2); // awesome
assert_eq!(mapping[&2], 2..5); // NYC
assert_eq!(mapping[&3], 5..7); // subway
assert_eq!(mapping[&4], 2..3); // new
assert_eq!(mapping[&5], 3..4); // york
assert_eq!(mapping[&6], 4..5); // city
assert_eq!(mapping[&7], 5..6); // underground
assert_eq!(mapping[&8], 6..7); // train
}
#[test]
fn multiple_probable_growings() {
let query = ["great", "awesome", "NYC", "subway"];
// 0 1 2 3
let mut builder = QueryWordsMapper::new(&query);
// NYC = new york city
builder.declare(2..3, 4, &["new", "york", "city"]);
// ^ 4 5 6
// subway = underground train
builder.declare(3..4, 7, &["underground", "train"]);
// ^ 7 8
// great awesome = good
builder.declare(0..2, 9, &["good"]);
// ^ 9
// awesome NYC = NY
builder.declare(1..3, 10, &["NY"]);
// ^^ 10
// NYC subway = metro
builder.declare(2..4, 11, &["metro"]);
// ^^ 11
let mapping = builder.mapping();
assert_eq!(mapping[&0], 0..1); // great
assert_eq!(mapping[&1], 1..2); // awesome
assert_eq!(mapping[&2], 2..5); // NYC
assert_eq!(mapping[&3], 5..7); // subway
assert_eq!(mapping[&4], 2..3); // new
assert_eq!(mapping[&5], 3..4); // york
assert_eq!(mapping[&6], 4..5); // city
assert_eq!(mapping[&7], 5..6); // underground
assert_eq!(mapping[&8], 6..7); // train
assert_eq!(mapping[&9], 0..2); // good
assert_eq!(mapping[&10], 1..5); // NY
assert_eq!(mapping[&11], 2..7); // metro
}
}

View File

@ -0,0 +1,41 @@
use std::io::{Read, Write};
use hashbrown::HashMap;
use meilisearch_schema::FieldId;
use serde::{Deserialize, Serialize};
use crate::{DocumentId, Number};
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct RankedMap(HashMap<(DocumentId, FieldId), Number>);
impl RankedMap {
pub fn len(&self) -> usize {
self.0.len()
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn insert(&mut self, document: DocumentId, field: FieldId, number: Number) {
self.0.insert((document, field), number);
}
pub fn remove(&mut self, document: DocumentId, field: FieldId) {
self.0.remove(&(document, field));
}
pub fn get(&self, document: DocumentId, field: FieldId) -> Option<Number> {
self.0.get(&(document, field)).cloned()
}
pub fn read_from_bin<R: Read>(reader: R) -> bincode::Result<RankedMap> {
bincode::deserialize_from(reader).map(RankedMap)
}
pub fn write_to_bin<W: Write>(&self, writer: W) -> bincode::Result<()> {
bincode::serialize_into(writer, &self.0)
}
}

View File

@ -0,0 +1,51 @@
use compact_arena::SmallArena;
use sdset::SetBuf;
use crate::DocIndex;
use crate::bucket_sort::{SimpleMatch, BareMatch, PostingsListView};
use crate::reordered_attrs::ReorderedAttrs;
pub struct RawDocument<'a, 'tag> {
pub id: crate::DocumentId,
pub bare_matches: &'a mut [BareMatch<'tag>],
pub processed_matches: Vec<SimpleMatch>,
/// The list of minimum `distance` found
pub processed_distances: Vec<Option<u8>>,
/// Does this document contains a field
/// with one word that is exactly matching
pub contains_one_word_field: bool,
}
impl<'a, 'tag> RawDocument<'a, 'tag> {
pub fn new<'txn>(
bare_matches: &'a mut [BareMatch<'tag>],
postings_lists: &mut SmallArena<'tag, PostingsListView<'txn>>,
searchable_attrs: Option<&ReorderedAttrs>,
) -> RawDocument<'a, 'tag>
{
if let Some(reordered_attrs) = searchable_attrs {
for bm in bare_matches.iter() {
let postings_list = &postings_lists[bm.postings_list];
let mut rewritten = Vec::new();
for di in postings_list.iter() {
if let Some(attribute) = reordered_attrs.get(di.attribute) {
rewritten.push(DocIndex { attribute, ..*di });
}
}
let new_postings = SetBuf::from_dirty(rewritten);
postings_lists[bm.postings_list].rewrite_with(new_postings);
}
}
bare_matches.sort_unstable_by_key(|m| m.query_index);
RawDocument {
id: bare_matches[0].document_id,
bare_matches,
processed_matches: Vec::new(),
processed_distances: Vec::new(),
contains_one_word_field: false,
}
}
}

View File

@ -0,0 +1,272 @@
use std::collections::{BTreeMap, HashMap};
use std::convert::TryFrom;
use crate::{DocIndex, DocumentId};
use deunicode::deunicode_with_tofu;
use meilisearch_schema::IndexedPos;
use meilisearch_tokenizer::{is_cjk, SeqTokenizer, Token, Tokenizer};
use sdset::SetBuf;
const WORD_LENGTH_LIMIT: usize = 80;
type Word = Vec<u8>; // TODO make it be a SmallVec
pub struct RawIndexer {
word_limit: usize, // the maximum number of indexed words
stop_words: fst::Set,
words_doc_indexes: BTreeMap<Word, Vec<DocIndex>>,
docs_words: HashMap<DocumentId, Vec<Word>>,
}
pub struct Indexed {
pub words_doc_indexes: BTreeMap<Word, SetBuf<DocIndex>>,
pub docs_words: HashMap<DocumentId, fst::Set>,
}
impl RawIndexer {
pub fn new(stop_words: fst::Set) -> RawIndexer {
RawIndexer::with_word_limit(stop_words, 1000)
}
pub fn with_word_limit(stop_words: fst::Set, limit: usize) -> RawIndexer {
RawIndexer {
word_limit: limit,
stop_words,
words_doc_indexes: BTreeMap::new(),
docs_words: HashMap::new(),
}
}
pub fn index_text(&mut self, id: DocumentId, indexed_pos: IndexedPos, text: &str) -> usize {
let mut number_of_words = 0;
for token in Tokenizer::new(text) {
let must_continue = index_token(
token,
id,
indexed_pos,
self.word_limit,
&self.stop_words,
&mut self.words_doc_indexes,
&mut self.docs_words,
);
number_of_words += 1;
if !must_continue {
break;
}
}
number_of_words
}
pub fn index_text_seq<'a, I>(&mut self, id: DocumentId, indexed_pos: IndexedPos, iter: I)
where
I: IntoIterator<Item = &'a str>,
{
let iter = iter.into_iter();
for token in SeqTokenizer::new(iter) {
let must_continue = index_token(
token,
id,
indexed_pos,
self.word_limit,
&self.stop_words,
&mut self.words_doc_indexes,
&mut self.docs_words,
);
if !must_continue {
break;
}
}
}
pub fn build(self) -> Indexed {
let words_doc_indexes = self
.words_doc_indexes
.into_iter()
.map(|(word, indexes)| (word, SetBuf::from_dirty(indexes)))
.collect();
let docs_words = self
.docs_words
.into_iter()
.map(|(id, mut words)| {
words.sort_unstable();
words.dedup();
(id, fst::Set::from_iter(words).unwrap())
})
.collect();
Indexed {
words_doc_indexes,
docs_words,
}
}
}
fn index_token(
token: Token,
id: DocumentId,
indexed_pos: IndexedPos,
word_limit: usize,
stop_words: &fst::Set,
words_doc_indexes: &mut BTreeMap<Word, Vec<DocIndex>>,
docs_words: &mut HashMap<DocumentId, Vec<Word>>,
) -> bool {
if token.word_index >= word_limit {
return false;
}
let lower = token.word.to_lowercase();
let token = Token {
word: &lower,
..token
};
if !stop_words.contains(&token.word) {
match token_to_docindex(id, indexed_pos, token) {
Some(docindex) => {
let word = Vec::from(token.word);
if word.len() <= WORD_LENGTH_LIMIT {
words_doc_indexes
.entry(word.clone())
.or_insert_with(Vec::new)
.push(docindex);
docs_words.entry(id).or_insert_with(Vec::new).push(word);
if !lower.contains(is_cjk) {
let unidecoded = deunicode_with_tofu(&lower, "");
if unidecoded != lower && !unidecoded.is_empty() {
let word = Vec::from(unidecoded);
if word.len() <= WORD_LENGTH_LIMIT {
words_doc_indexes
.entry(word.clone())
.or_insert_with(Vec::new)
.push(docindex);
docs_words.entry(id).or_insert_with(Vec::new).push(word);
}
}
}
}
}
None => return false,
}
}
true
}
fn token_to_docindex(id: DocumentId, indexed_pos: IndexedPos, token: Token) -> Option<DocIndex> {
let word_index = u16::try_from(token.word_index).ok()?;
let char_index = u16::try_from(token.char_index).ok()?;
let char_length = u16::try_from(token.word.chars().count()).ok()?;
let docindex = DocIndex {
document_id: id,
attribute: indexed_pos.0,
word_index,
char_index,
char_length,
};
Some(docindex)
}
#[cfg(test)]
mod tests {
use super::*;
use meilisearch_schema::IndexedPos;
#[test]
fn strange_apostrophe() {
let mut indexer = RawIndexer::new(fst::Set::default());
let docid = DocumentId(0);
let indexed_pos = IndexedPos(0);
let text = "Zut, laspirateur, jai oublié de léteindre !";
indexer.index_text(docid, indexed_pos, text);
let Indexed {
words_doc_indexes, ..
} = indexer.build();
assert!(words_doc_indexes.get(&b"l"[..]).is_some());
assert!(words_doc_indexes.get(&b"aspirateur"[..]).is_some());
assert!(words_doc_indexes.get(&b"ai"[..]).is_some());
assert!(words_doc_indexes.get(&b"eteindre"[..]).is_some());
assert!(words_doc_indexes
.get(&"éteindre".to_owned().into_bytes())
.is_some());
}
#[test]
fn strange_apostrophe_in_sequence() {
let mut indexer = RawIndexer::new(fst::Set::default());
let docid = DocumentId(0);
let indexed_pos = IndexedPos(0);
let text = vec!["Zut, laspirateur, jai oublié de léteindre !"];
indexer.index_text_seq(docid, indexed_pos, text);
let Indexed {
words_doc_indexes, ..
} = indexer.build();
assert!(words_doc_indexes.get(&b"l"[..]).is_some());
assert!(words_doc_indexes.get(&b"aspirateur"[..]).is_some());
assert!(words_doc_indexes.get(&b"ai"[..]).is_some());
assert!(words_doc_indexes.get(&b"eteindre"[..]).is_some());
assert!(words_doc_indexes
.get(&"éteindre".to_owned().into_bytes())
.is_some());
}
#[test]
fn basic_stop_words() {
let stop_words = sdset::SetBuf::from_dirty(vec!["l", "j", "ai", "de"]);
let stop_words = fst::Set::from_iter(stop_words).unwrap();
let mut indexer = RawIndexer::new(stop_words);
let docid = DocumentId(0);
let indexed_pos = IndexedPos(0);
let text = "Zut, laspirateur, jai oublié de léteindre !";
indexer.index_text(docid, indexed_pos, text);
let Indexed {
words_doc_indexes, ..
} = indexer.build();
assert!(words_doc_indexes.get(&b"l"[..]).is_none());
assert!(words_doc_indexes.get(&b"aspirateur"[..]).is_some());
assert!(words_doc_indexes.get(&b"j"[..]).is_none());
assert!(words_doc_indexes.get(&b"ai"[..]).is_none());
assert!(words_doc_indexes.get(&b"de"[..]).is_none());
assert!(words_doc_indexes.get(&b"eteindre"[..]).is_some());
assert!(words_doc_indexes
.get(&"éteindre".to_owned().into_bytes())
.is_some());
}
#[test]
fn no_empty_unidecode() {
let mut indexer = RawIndexer::new(fst::Set::default());
let docid = DocumentId(0);
let indexed_pos = IndexedPos(0);
let text = "🇯🇵";
indexer.index_text(docid, indexed_pos, text);
let Indexed {
words_doc_indexes, ..
} = indexer.build();
assert!(words_doc_indexes
.get(&"🇯🇵".to_owned().into_bytes())
.is_some());
}
}

View File

@ -0,0 +1,31 @@
use std::cmp;
#[derive(Default, Clone)]
pub struct ReorderedAttrs {
reorders: Vec<Option<u16>>,
reverse: Vec<u16>,
}
impl ReorderedAttrs {
pub fn new() -> ReorderedAttrs {
ReorderedAttrs { reorders: Vec::new(), reverse: Vec::new() }
}
pub fn insert_attribute(&mut self, attribute: u16) {
let new_len = cmp::max(attribute as usize + 1, self.reorders.len());
self.reorders.resize(new_len, None);
self.reorders[attribute as usize] = Some(self.reverse.len() as u16);
self.reverse.push(attribute);
}
pub fn get(&self, attribute: u16) -> Option<u16> {
match self.reorders.get(attribute as usize)? {
Some(attribute) => Some(*attribute),
None => None,
}
}
pub fn reverse(&self, attribute: u16) -> Option<u16> {
self.reverse.get(attribute as usize).copied()
}
}

View File

@ -0,0 +1,198 @@
use std::str::FromStr;
use ordered_float::OrderedFloat;
use serde::ser;
use serde::Serialize;
use super::SerializerError;
use crate::Number;
pub struct ConvertToNumber;
impl ser::Serializer for ConvertToNumber {
type Ok = Number;
type Error = SerializerError;
type SerializeSeq = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTuple = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleStruct = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleVariant = ser::Impossible<Self::Ok, Self::Error>;
type SerializeMap = ser::Impossible<Self::Ok, Self::Error>;
type SerializeStruct = ser::Impossible<Self::Ok, Self::Error>;
type SerializeStructVariant = ser::Impossible<Self::Ok, Self::Error>;
fn serialize_bool(self, value: bool) -> Result<Self::Ok, Self::Error> {
Ok(Number::Unsigned(u64::from(value)))
}
fn serialize_char(self, _value: char) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnrankableType { type_name: "char" })
}
fn serialize_i8(self, value: i8) -> Result<Self::Ok, Self::Error> {
Ok(Number::Signed(i64::from(value)))
}
fn serialize_i16(self, value: i16) -> Result<Self::Ok, Self::Error> {
Ok(Number::Signed(i64::from(value)))
}
fn serialize_i32(self, value: i32) -> Result<Self::Ok, Self::Error> {
Ok(Number::Signed(i64::from(value)))
}
fn serialize_i64(self, value: i64) -> Result<Self::Ok, Self::Error> {
Ok(Number::Signed(value))
}
fn serialize_u8(self, value: u8) -> Result<Self::Ok, Self::Error> {
Ok(Number::Unsigned(u64::from(value)))
}
fn serialize_u16(self, value: u16) -> Result<Self::Ok, Self::Error> {
Ok(Number::Unsigned(u64::from(value)))
}
fn serialize_u32(self, value: u32) -> Result<Self::Ok, Self::Error> {
Ok(Number::Unsigned(u64::from(value)))
}
fn serialize_u64(self, value: u64) -> Result<Self::Ok, Self::Error> {
Ok(Number::Unsigned(value))
}
fn serialize_f32(self, value: f32) -> Result<Self::Ok, Self::Error> {
Ok(Number::Float(OrderedFloat(f64::from(value))))
}
fn serialize_f64(self, value: f64) -> Result<Self::Ok, Self::Error> {
Ok(Number::Float(OrderedFloat(value)))
}
fn serialize_str(self, value: &str) -> Result<Self::Ok, Self::Error> {
Ok(Number::from_str(value)?)
}
fn serialize_bytes(self, _v: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnrankableType { type_name: "&[u8]" })
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "Option",
})
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
Err(SerializerError::UnrankableType {
type_name: "Option",
})
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnrankableType { type_name: "()" })
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "unit struct",
})
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "unit variant",
})
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
Err(SerializerError::UnrankableType {
type_name: "newtype variant",
})
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "sequence",
})
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(SerializerError::UnrankableType { type_name: "tuple" })
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "tuple struct",
})
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "tuple variant",
})
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(SerializerError::UnrankableType { type_name: "map" })
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "struct",
})
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(SerializerError::UnrankableType {
type_name: "struct variant",
})
}
}

View File

@ -0,0 +1,279 @@
use serde::ser;
use serde::Serialize;
use super::SerializerError;
pub struct ConvertToString;
impl ser::Serializer for ConvertToString {
type Ok = String;
type Error = SerializerError;
type SerializeSeq = SeqConvertToString;
type SerializeTuple = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleStruct = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleVariant = ser::Impossible<Self::Ok, Self::Error>;
type SerializeMap = MapConvertToString;
type SerializeStruct = StructConvertToString;
type SerializeStructVariant = ser::Impossible<Self::Ok, Self::Error>;
fn serialize_bool(self, value: bool) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_char(self, value: char) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_i8(self, value: i8) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_i16(self, value: i16) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_i32(self, value: i32) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_i64(self, value: i64) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_u8(self, value: u8) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_u16(self, value: u16) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_u32(self, value: u32) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_u64(self, value: u64) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_f32(self, value: f32) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_f64(self, value: f64) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_str(self, value: &str) -> Result<Self::Ok, Self::Error> {
Ok(value.to_string())
}
fn serialize_bytes(self, _v: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "&[u8]" })
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "Option",
})
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
Err(SerializerError::UnserializableType {
type_name: "Option",
})
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Ok(String::new())
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "unit struct",
})
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "unit variant",
})
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
Err(SerializerError::UnserializableType {
type_name: "newtype variant",
})
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Ok(SeqConvertToString {
text: String::new(),
})
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "tuple" })
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "tuple struct",
})
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "tuple variant",
})
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Ok(MapConvertToString {
text: String::new(),
})
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Ok(StructConvertToString {
text: String::new(),
})
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "struct variant",
})
}
}
pub struct MapConvertToString {
text: String,
}
impl ser::SerializeMap for MapConvertToString {
type Ok = String;
type Error = SerializerError;
fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let text = key.serialize(ConvertToString)?;
self.text.push_str(&text);
self.text.push_str(" ");
Ok(())
}
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let text = value.serialize(ConvertToString)?;
self.text.push_str(&text);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(self.text)
}
}
pub struct StructConvertToString {
text: String,
}
impl ser::SerializeStruct for StructConvertToString {
type Ok = String;
type Error = SerializerError;
fn serialize_field<T: ?Sized>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let value = value.serialize(ConvertToString)?;
self.text.push_str(key);
self.text.push_str(" ");
self.text.push_str(&value);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(self.text)
}
}
pub struct SeqConvertToString {
text: String,
}
impl ser::SerializeSeq for SeqConvertToString {
type Ok = String;
type Error = SerializerError;
fn serialize_element<T: ?Sized>(&mut self, key: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let text = key.serialize(ConvertToString)?;
self.text.push_str(&text);
self.text.push_str(" ");
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(self.text)
}
}

View File

@ -0,0 +1,161 @@
use std::collections::HashSet;
use std::io::Cursor;
use std::{error::Error, fmt};
use meilisearch_schema::{Schema, FieldId};
use serde::{de, forward_to_deserialize_any};
use serde_json::de::IoRead as SerdeJsonIoRead;
use serde_json::Deserializer as SerdeJsonDeserializer;
use serde_json::Error as SerdeJsonError;
use crate::database::MainT;
use crate::store::DocumentsFields;
use crate::DocumentId;
#[derive(Debug)]
pub enum DeserializerError {
SerdeJson(SerdeJsonError),
Zlmdb(heed::Error),
Custom(String),
}
impl de::Error for DeserializerError {
fn custom<T: fmt::Display>(msg: T) -> Self {
DeserializerError::Custom(msg.to_string())
}
}
impl fmt::Display for DeserializerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DeserializerError::SerdeJson(e) => write!(f, "serde json related error: {}", e),
DeserializerError::Zlmdb(e) => write!(f, "heed related error: {}", e),
DeserializerError::Custom(s) => f.write_str(s),
}
}
}
impl Error for DeserializerError {}
impl From<SerdeJsonError> for DeserializerError {
fn from(error: SerdeJsonError) -> DeserializerError {
DeserializerError::SerdeJson(error)
}
}
impl From<heed::Error> for DeserializerError {
fn from(error: heed::Error) -> DeserializerError {
DeserializerError::Zlmdb(error)
}
}
pub struct Deserializer<'a> {
pub document_id: DocumentId,
pub reader: &'a heed::RoTxn<MainT>,
pub documents_fields: DocumentsFields,
pub schema: &'a Schema,
pub fields: Option<&'a HashSet<FieldId>>,
}
impl<'de, 'a, 'b> de::Deserializer<'de> for &'b mut Deserializer<'a> {
type Error = DeserializerError;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
self.deserialize_option(visitor)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
let mut error = None;
let iter = self
.documents_fields
.document_fields(self.reader, self.document_id)?
.filter_map(|result| {
let (attr, value) = match result {
Ok(value) => value,
Err(e) => {
error = Some(e);
return None;
}
};
let is_displayed = self.schema.is_displayed(attr);
if is_displayed && self.fields.map_or(true, |f| f.contains(&attr)) {
if let Some(attribute_name) = self.schema.name(attr) {
let cursor = Cursor::new(value.to_owned());
let ioread = SerdeJsonIoRead::new(cursor);
let value = Value(SerdeJsonDeserializer::new(ioread));
Some((attribute_name, value))
} else {
None
}
} else {
None
}
});
let mut iter = iter.peekable();
let result = match iter.peek() {
Some(_) => {
let map_deserializer = de::value::MapDeserializer::new(iter);
visitor
.visit_some(map_deserializer)
.map_err(DeserializerError::from)
}
None => visitor.visit_none(),
};
match error.take() {
Some(error) => Err(error.into()),
None => result,
}
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
bytes byte_buf unit unit_struct newtype_struct seq tuple
tuple_struct struct enum identifier ignored_any
}
}
struct Value(SerdeJsonDeserializer<SerdeJsonIoRead<Cursor<Vec<u8>>>>);
impl<'de> de::IntoDeserializer<'de, SerdeJsonError> for Value {
type Deserializer = Self;
fn into_deserializer(self) -> Self::Deserializer {
self
}
}
impl<'de> de::Deserializer<'de> for Value {
type Error = SerdeJsonError;
fn deserialize_any<V>(mut self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
self.0.deserialize_any(visitor)
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
bytes byte_buf option unit unit_struct newtype_struct seq tuple
tuple_struct map struct enum identifier ignored_any
}
}

View File

@ -0,0 +1,310 @@
use std::hash::{Hash, Hasher};
use crate::DocumentId;
use serde::{ser, Serialize};
use serde_json::{Value, Number};
use siphasher::sip::SipHasher;
use super::{ConvertToString, SerializerError};
pub fn extract_document_id<D>(
primary_key: &str,
document: &D,
) -> Result<Option<DocumentId>, SerializerError>
where
D: serde::Serialize,
{
let serializer = ExtractDocumentId { primary_key };
document.serialize(serializer)
}
fn validate_number(value: &Number) -> Option<String> {
if value.is_f64() {
return None
}
Some(value.to_string())
}
fn validate_string(value: &str) -> Option<String> {
if value.chars().all(|x| x.is_ascii_alphanumeric() || x == '-' || x == '_') {
Some(value.to_string())
} else {
None
}
}
pub fn value_to_string(value: &Value) -> Option<String> {
match value {
Value::Null => None,
Value::Bool(_) => None,
Value::Number(value) => validate_number(value),
Value::String(value) => validate_string(value),
Value::Array(_) => None,
Value::Object(_) => None,
}
}
pub fn compute_document_id<H: Hash>(t: H) -> DocumentId {
let mut s = SipHasher::new();
t.hash(&mut s);
let hash = s.finish();
DocumentId(hash)
}
struct ExtractDocumentId<'a> {
primary_key: &'a str,
}
impl<'a> ser::Serializer for ExtractDocumentId<'a> {
type Ok = Option<DocumentId>;
type Error = SerializerError;
type SerializeSeq = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTuple = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleStruct = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleVariant = ser::Impossible<Self::Ok, Self::Error>;
type SerializeMap = ExtractDocumentIdMapSerializer<'a>;
type SerializeStruct = ExtractDocumentIdStructSerializer<'a>;
type SerializeStructVariant = ser::Impossible<Self::Ok, Self::Error>;
forward_to_unserializable_type! {
bool => serialize_bool,
char => serialize_char,
i8 => serialize_i8,
i16 => serialize_i16,
i32 => serialize_i32,
i64 => serialize_i64,
u8 => serialize_u8,
u16 => serialize_u16,
u32 => serialize_u32,
u64 => serialize_u64,
f32 => serialize_f32,
f64 => serialize_f64,
}
fn serialize_str(self, _value: &str) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "str" })
}
fn serialize_bytes(self, _value: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "&[u8]" })
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "Option",
})
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
Err(SerializerError::UnserializableType {
type_name: "Option",
})
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "()" })
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "unit struct",
})
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "unit variant",
})
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: Serialize,
{
Err(SerializerError::UnserializableType {
type_name: "newtype variant",
})
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "sequence",
})
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "tuple" })
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "tuple struct",
})
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "tuple variant",
})
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
let serializer = ExtractDocumentIdMapSerializer {
primary_key: self.primary_key,
document_id: None,
current_key_name: None,
};
Ok(serializer)
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
let serializer = ExtractDocumentIdStructSerializer {
primary_key: self.primary_key,
document_id: None,
};
Ok(serializer)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "struct variant",
})
}
}
pub struct ExtractDocumentIdMapSerializer<'a> {
primary_key: &'a str,
document_id: Option<DocumentId>,
current_key_name: Option<String>,
}
impl<'a> ser::SerializeMap for ExtractDocumentIdMapSerializer<'a> {
type Ok = Option<DocumentId>;
type Error = SerializerError;
fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Self::Error>
where
T: Serialize,
{
let key = key.serialize(ConvertToString)?;
self.current_key_name = Some(key);
Ok(())
}
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: Serialize,
{
let key = self.current_key_name.take().unwrap();
self.serialize_entry(&key, value)
}
fn serialize_entry<K: ?Sized, V: ?Sized>(
&mut self,
key: &K,
value: &V,
) -> Result<(), Self::Error>
where
K: Serialize,
V: Serialize,
{
let key = key.serialize(ConvertToString)?;
if self.primary_key == key {
let value = serde_json::to_string(value).and_then(|s| serde_json::from_str(&s))?;
match value_to_string(&value).map(|s| compute_document_id(&s)) {
Some(document_id) => self.document_id = Some(document_id),
None => return Err(SerializerError::InvalidDocumentIdType),
}
}
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(self.document_id)
}
}
pub struct ExtractDocumentIdStructSerializer<'a> {
primary_key: &'a str,
document_id: Option<DocumentId>,
}
impl<'a> ser::SerializeStruct for ExtractDocumentIdStructSerializer<'a> {
type Ok = Option<DocumentId>;
type Error = SerializerError;
fn serialize_field<T: ?Sized>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: Serialize,
{
if self.primary_key == key {
let value = serde_json::to_string(value).and_then(|s| serde_json::from_str(&s))?;
match value_to_string(&value).map(compute_document_id) {
Some(document_id) => self.document_id = Some(document_id),
None => return Err(SerializerError::InvalidDocumentIdType),
}
}
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(self.document_id)
}
}

View File

@ -0,0 +1,362 @@
use meilisearch_schema::IndexedPos;
use serde::ser;
use serde::Serialize;
use super::{ConvertToString, SerializerError};
use crate::raw_indexer::RawIndexer;
use crate::DocumentId;
pub struct Indexer<'a> {
pub pos: IndexedPos,
pub indexer: &'a mut RawIndexer,
pub document_id: DocumentId,
}
impl<'a> ser::Serializer for Indexer<'a> {
type Ok = Option<usize>;
type Error = SerializerError;
type SerializeSeq = SeqIndexer<'a>;
type SerializeTuple = TupleIndexer<'a>;
type SerializeTupleStruct = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleVariant = ser::Impossible<Self::Ok, Self::Error>;
type SerializeMap = MapIndexer<'a>;
type SerializeStruct = StructIndexer<'a>;
type SerializeStructVariant = ser::Impossible<Self::Ok, Self::Error>;
fn serialize_bool(self, _value: bool) -> Result<Self::Ok, Self::Error> {
Ok(None)
}
fn serialize_char(self, value: char) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_i8(self, value: i8) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_i16(self, value: i16) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_i32(self, value: i32) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_i64(self, value: i64) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_u8(self, value: u8) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_u16(self, value: u16) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_u32(self, value: u32) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_u64(self, value: u64) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_f32(self, value: f32) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_f64(self, value: f64) -> Result<Self::Ok, Self::Error> {
let text = value.serialize(ConvertToString)?;
self.serialize_str(&text)
}
fn serialize_str(self, text: &str) -> Result<Self::Ok, Self::Error> {
let number_of_words = self
.indexer
.index_text(self.document_id, self.pos, text);
Ok(Some(number_of_words))
}
fn serialize_bytes(self, _v: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnindexableType { type_name: "&[u8]" })
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Ok(None)
}
fn serialize_some<T: ?Sized>(self, value: &T) -> Result<Self::Ok, Self::Error>
where
T: ser::Serialize,
{
let text = value.serialize(ConvertToString)?;
let number_of_words = self
.indexer
.index_text(self.document_id, self.pos, &text);
Ok(Some(number_of_words))
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Ok(None)
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
Ok(None)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
Ok(None)
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: ser::Serialize,
{
Err(SerializerError::UnindexableType {
type_name: "newtype variant",
})
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
let indexer = SeqIndexer {
pos: self.pos,
document_id: self.document_id,
indexer: self.indexer,
texts: Vec::new(),
};
Ok(indexer)
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
let indexer = TupleIndexer {
pos: self.pos,
document_id: self.document_id,
indexer: self.indexer,
texts: Vec::new(),
};
Ok(indexer)
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(SerializerError::UnindexableType {
type_name: "tuple struct",
})
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(SerializerError::UnindexableType {
type_name: "tuple variant",
})
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
let indexer = MapIndexer {
pos: self.pos,
document_id: self.document_id,
indexer: self.indexer,
texts: Vec::new(),
};
Ok(indexer)
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
let indexer = StructIndexer {
pos: self.pos,
document_id: self.document_id,
indexer: self.indexer,
texts: Vec::new(),
};
Ok(indexer)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(SerializerError::UnindexableType {
type_name: "struct variant",
})
}
}
pub struct SeqIndexer<'a> {
pos: IndexedPos,
document_id: DocumentId,
indexer: &'a mut RawIndexer,
texts: Vec<String>,
}
impl<'a> ser::SerializeSeq for SeqIndexer<'a> {
type Ok = Option<usize>;
type Error = SerializerError;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let text = value.serialize(ConvertToString)?;
self.texts.push(text);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
let texts = self.texts.iter().map(String::as_str);
self.indexer
.index_text_seq(self.document_id, self.pos, texts);
Ok(None)
}
}
pub struct MapIndexer<'a> {
pos: IndexedPos,
document_id: DocumentId,
indexer: &'a mut RawIndexer,
texts: Vec<String>,
}
impl<'a> ser::SerializeMap for MapIndexer<'a> {
type Ok = Option<usize>;
type Error = SerializerError;
fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let text = key.serialize(ConvertToString)?;
self.texts.push(text);
Ok(())
}
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let text = value.serialize(ConvertToString)?;
self.texts.push(text);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
let texts = self.texts.iter().map(String::as_str);
self.indexer
.index_text_seq(self.document_id, self.pos, texts);
Ok(None)
}
}
pub struct StructIndexer<'a> {
pos: IndexedPos,
document_id: DocumentId,
indexer: &'a mut RawIndexer,
texts: Vec<String>,
}
impl<'a> ser::SerializeStruct for StructIndexer<'a> {
type Ok = Option<usize>;
type Error = SerializerError;
fn serialize_field<T: ?Sized>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let key_text = key.to_owned();
let value_text = value.serialize(ConvertToString)?;
self.texts.push(key_text);
self.texts.push(value_text);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
let texts = self.texts.iter().map(String::as_str);
self.indexer
.index_text_seq(self.document_id, self.pos, texts);
Ok(None)
}
}
pub struct TupleIndexer<'a> {
pos: IndexedPos,
document_id: DocumentId,
indexer: &'a mut RawIndexer,
texts: Vec<String>,
}
impl<'a> ser::SerializeTuple for TupleIndexer<'a> {
type Ok = Option<usize>;
type Error = SerializerError;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: Serialize,
{
let text = value.serialize(ConvertToString)?;
self.texts.push(text);
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
let texts = self.texts.iter().map(String::as_str);
self.indexer
.index_text_seq(self.document_id, self.pos, texts);
Ok(None)
}
}

View File

@ -0,0 +1,112 @@
macro_rules! forward_to_unserializable_type {
($($ty:ident => $se_method:ident,)*) => {
$(
fn $se_method(self, _v: $ty) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "$ty" })
}
)*
}
}
mod convert_to_number;
mod convert_to_string;
mod deserializer;
mod extract_document_id;
mod indexer;
mod serializer;
pub use self::convert_to_number::ConvertToNumber;
pub use self::convert_to_string::ConvertToString;
pub use self::deserializer::{Deserializer, DeserializerError};
pub use self::extract_document_id::{compute_document_id, extract_document_id, value_to_string};
pub use self::indexer::Indexer;
pub use self::serializer::{serialize_value, serialize_value_with_id, Serializer};
use std::{error::Error, fmt};
use serde::ser;
use serde_json::Error as SerdeJsonError;
use meilisearch_schema::Error as SchemaError;
use crate::ParseNumberError;
#[derive(Debug)]
pub enum SerializerError {
DocumentIdNotFound,
InvalidDocumentIdType,
Zlmdb(heed::Error),
SerdeJson(SerdeJsonError),
ParseNumber(ParseNumberError),
Schema(SchemaError),
UnserializableType { type_name: &'static str },
UnindexableType { type_name: &'static str },
UnrankableType { type_name: &'static str },
Custom(String),
}
impl ser::Error for SerializerError {
fn custom<T: fmt::Display>(msg: T) -> Self {
SerializerError::Custom(msg.to_string())
}
}
impl fmt::Display for SerializerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
SerializerError::DocumentIdNotFound => {
f.write_str("serialized document does not have an id according to the schema")
}
SerializerError::InvalidDocumentIdType => {
f.write_str("a document primary key can be of type integer or string only composed of alphanumeric characters, hyphens (-) and underscores (_).")
}
SerializerError::Zlmdb(e) => write!(f, "heed related error: {}", e),
SerializerError::SerdeJson(e) => write!(f, "serde json error: {}", e),
SerializerError::ParseNumber(e) => {
write!(f, "error while trying to parse a number: {}", e)
}
SerializerError::Schema(e) => write!(f, "impossible to update schema: {}", e),
SerializerError::UnserializableType { type_name } => {
write!(f, "{} is not a serializable type", type_name)
}
SerializerError::UnindexableType { type_name } => {
write!(f, "{} is not an indexable type", type_name)
}
SerializerError::UnrankableType { type_name } => {
write!(f, "{} types can not be used for ranking", type_name)
}
SerializerError::Custom(s) => f.write_str(s),
}
}
}
impl Error for SerializerError {}
impl From<String> for SerializerError {
fn from(value: String) -> SerializerError {
SerializerError::Custom(value)
}
}
impl From<SerdeJsonError> for SerializerError {
fn from(error: SerdeJsonError) -> SerializerError {
SerializerError::SerdeJson(error)
}
}
impl From<heed::Error> for SerializerError {
fn from(error: heed::Error) -> SerializerError {
SerializerError::Zlmdb(error)
}
}
impl From<ParseNumberError> for SerializerError {
fn from(error: ParseNumberError) -> SerializerError {
SerializerError::ParseNumber(error)
}
}
impl From<SchemaError> for SerializerError {
fn from(error: SchemaError) -> SerializerError {
SerializerError::Schema(error)
}
}

View File

@ -0,0 +1,361 @@
use meilisearch_schema::{Schema, FieldId};
use serde::ser;
use crate::database::MainT;
use crate::raw_indexer::RawIndexer;
use crate::store::{DocumentsFields, DocumentsFieldsCounts};
use crate::{DocumentId, RankedMap};
use super::{ConvertToNumber, ConvertToString, Indexer, SerializerError};
pub struct Serializer<'a, 'b> {
pub txn: &'a mut heed::RwTxn<'b, MainT>,
pub schema: &'a mut Schema,
pub document_store: DocumentsFields,
pub document_fields_counts: DocumentsFieldsCounts,
pub indexer: &'a mut RawIndexer,
pub ranked_map: &'a mut RankedMap,
pub document_id: DocumentId,
}
impl<'a, 'b> ser::Serializer for Serializer<'a, 'b> {
type Ok = ();
type Error = SerializerError;
type SerializeSeq = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTuple = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleStruct = ser::Impossible<Self::Ok, Self::Error>;
type SerializeTupleVariant = ser::Impossible<Self::Ok, Self::Error>;
type SerializeMap = MapSerializer<'a, 'b>;
type SerializeStruct = StructSerializer<'a, 'b>;
type SerializeStructVariant = ser::Impossible<Self::Ok, Self::Error>;
forward_to_unserializable_type! {
bool => serialize_bool,
char => serialize_char,
i8 => serialize_i8,
i16 => serialize_i16,
i32 => serialize_i32,
i64 => serialize_i64,
u8 => serialize_u8,
u16 => serialize_u16,
u32 => serialize_u32,
u64 => serialize_u64,
f32 => serialize_f32,
f64 => serialize_f64,
}
fn serialize_str(self, _v: &str) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "str" })
}
fn serialize_bytes(self, _v: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "&[u8]" })
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "Option",
})
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<Self::Ok, Self::Error>
where
T: ser::Serialize,
{
Err(SerializerError::UnserializableType {
type_name: "Option",
})
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "()" })
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "unit struct",
})
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "unit variant",
})
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: ser::Serialize,
{
Err(SerializerError::UnserializableType {
type_name: "newtype variant",
})
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "sequence",
})
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(SerializerError::UnserializableType { type_name: "tuple" })
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "tuple struct",
})
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "tuple variant",
})
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Ok(MapSerializer {
txn: self.txn,
schema: self.schema,
document_id: self.document_id,
document_store: self.document_store,
document_fields_counts: self.document_fields_counts,
indexer: self.indexer,
ranked_map: self.ranked_map,
current_key_name: None,
})
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Ok(StructSerializer {
txn: self.txn,
schema: self.schema,
document_id: self.document_id,
document_store: self.document_store,
document_fields_counts: self.document_fields_counts,
indexer: self.indexer,
ranked_map: self.ranked_map,
})
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(SerializerError::UnserializableType {
type_name: "struct variant",
})
}
}
pub struct MapSerializer<'a, 'b> {
txn: &'a mut heed::RwTxn<'b, MainT>,
schema: &'a mut Schema,
document_id: DocumentId,
document_store: DocumentsFields,
document_fields_counts: DocumentsFieldsCounts,
indexer: &'a mut RawIndexer,
ranked_map: &'a mut RankedMap,
current_key_name: Option<String>,
}
impl<'a, 'b> ser::SerializeMap for MapSerializer<'a, 'b> {
type Ok = ();
type Error = SerializerError;
fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let key = key.serialize(ConvertToString)?;
self.current_key_name = Some(key);
Ok(())
}
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
let key = self.current_key_name.take().unwrap();
self.serialize_entry(&key, value)
}
fn serialize_entry<K: ?Sized, V: ?Sized>(
&mut self,
key: &K,
value: &V,
) -> Result<(), Self::Error>
where
K: ser::Serialize,
V: ser::Serialize,
{
let key = key.serialize(ConvertToString)?;
serialize_value(
self.txn,
key.as_str(),
self.schema,
self.document_id,
self.document_store,
self.document_fields_counts,
self.indexer,
self.ranked_map,
value,
)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
pub struct StructSerializer<'a, 'b> {
txn: &'a mut heed::RwTxn<'b, MainT>,
schema: &'a mut Schema,
document_id: DocumentId,
document_store: DocumentsFields,
document_fields_counts: DocumentsFieldsCounts,
indexer: &'a mut RawIndexer,
ranked_map: &'a mut RankedMap,
}
impl<'a, 'b> ser::SerializeStruct for StructSerializer<'a, 'b> {
type Ok = ();
type Error = SerializerError;
fn serialize_field<T: ?Sized>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
serialize_value(
self.txn,
key,
self.schema,
self.document_id,
self.document_store,
self.document_fields_counts,
self.indexer,
self.ranked_map,
value,
)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(())
}
}
pub fn serialize_value<'a, T: ?Sized>(
txn: &mut heed::RwTxn<MainT>,
attribute: &str,
schema: &'a mut Schema,
document_id: DocumentId,
document_store: DocumentsFields,
documents_fields_counts: DocumentsFieldsCounts,
indexer: &mut RawIndexer,
ranked_map: &mut RankedMap,
value: &T,
) -> Result<(), SerializerError>
where
T: ser::Serialize,
{
let field_id = schema.insert_and_index(&attribute)?;
serialize_value_with_id(
txn,
field_id,
schema,
document_id,
document_store,
documents_fields_counts,
indexer,
ranked_map,
value,
)
}
pub fn serialize_value_with_id<'a, T: ?Sized>(
txn: &mut heed::RwTxn<MainT>,
field_id: FieldId,
schema: &'a Schema,
document_id: DocumentId,
document_store: DocumentsFields,
documents_fields_counts: DocumentsFieldsCounts,
indexer: &mut RawIndexer,
ranked_map: &mut RankedMap,
value: &T,
) -> Result<(), SerializerError>
where
T: ser::Serialize,
{
let serialized = serde_json::to_vec(value)?;
document_store.put_document_field(txn, document_id, field_id, &serialized)?;
if let Some(indexed_pos) = schema.is_indexed(field_id) {
let indexer = Indexer {
pos: *indexed_pos,
indexer,
document_id,
};
if let Some(number_of_words) = value.serialize(indexer)? {
documents_fields_counts.put_document_field_count(
txn,
document_id,
*indexed_pos,
number_of_words as u16,
)?;
}
}
if schema.is_ranked(field_id) {
let number = value.serialize(ConvertToNumber).unwrap_or_default();
ranked_map.insert(document_id, field_id, number);
}
Ok(())
}

View File

@ -0,0 +1,184 @@
use std::collections::{BTreeMap, BTreeSet, HashSet};
use std::str::FromStr;
use std::iter::IntoIterator;
use serde::{Deserialize, Deserializer, Serialize};
use once_cell::sync::Lazy;
use self::RankingRule::*;
pub const DEFAULT_RANKING_RULES: [RankingRule; 6] = [Typo, Words, Proximity, Attribute, WordsPosition, Exactness];
static RANKING_RULE_REGEX: Lazy<regex::Regex> = Lazy::new(|| {
let regex = regex::Regex::new(r"(asc|desc)\(([a-zA-Z0-9-_]*)\)").unwrap();
regex
});
#[derive(Default, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct Settings {
#[serde(default, deserialize_with = "deserialize_some")]
pub ranking_rules: Option<Option<Vec<String>>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub distinct_attribute: Option<Option<String>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub searchable_attributes: Option<Option<Vec<String>>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub displayed_attributes: Option<Option<HashSet<String>>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub stop_words: Option<Option<BTreeSet<String>>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub synonyms: Option<Option<BTreeMap<String, Vec<String>>>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub accept_new_fields: Option<Option<bool>>,
}
// Any value that is present is considered Some value, including null.
fn deserialize_some<'de, T, D>(deserializer: D) -> Result<Option<T>, D::Error>
where T: Deserialize<'de>,
D: Deserializer<'de>
{
Deserialize::deserialize(deserializer).map(Some)
}
impl Settings {
pub fn into_update(&self) -> Result<SettingsUpdate, RankingRuleConversionError> {
let settings = self.clone();
let ranking_rules = match settings.ranking_rules {
Some(Some(rules)) => UpdateState::Update(RankingRule::from_iter(rules.iter())?),
Some(None) => UpdateState::Clear,
None => UpdateState::Nothing,
};
Ok(SettingsUpdate {
ranking_rules,
distinct_attribute: settings.distinct_attribute.into(),
primary_key: UpdateState::Nothing,
searchable_attributes: settings.searchable_attributes.into(),
displayed_attributes: settings.displayed_attributes.into(),
stop_words: settings.stop_words.into(),
synonyms: settings.synonyms.into(),
accept_new_fields: settings.accept_new_fields.into(),
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateState<T> {
Update(T),
Clear,
Nothing,
}
impl <T> From<Option<Option<T>>> for UpdateState<T> {
fn from(opt: Option<Option<T>>) -> UpdateState<T> {
match opt {
Some(Some(t)) => UpdateState::Update(t),
Some(None) => UpdateState::Clear,
None => UpdateState::Nothing,
}
}
}
#[derive(Debug, Clone)]
pub struct RankingRuleConversionError;
impl std::fmt::Display for RankingRuleConversionError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "impossible to convert into RankingRule")
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum RankingRule {
Typo,
Words,
Proximity,
Attribute,
WordsPosition,
Exactness,
Asc(String),
Desc(String),
}
impl std::fmt::Display for RankingRule {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
RankingRule::Typo => f.write_str("typo"),
RankingRule::Words => f.write_str("words"),
RankingRule::Proximity => f.write_str("proximity"),
RankingRule::Attribute => f.write_str("attribute"),
RankingRule::WordsPosition => f.write_str("wordsPosition"),
RankingRule::Exactness => f.write_str("exactness"),
RankingRule::Asc(field) => write!(f, "asc({})", field),
RankingRule::Desc(field) => write!(f, "desc({})", field),
}
}
}
impl FromStr for RankingRule {
type Err = RankingRuleConversionError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let rule = match s {
"typo" => RankingRule::Typo,
"words" => RankingRule::Words,
"proximity" => RankingRule::Proximity,
"attribute" => RankingRule::Attribute,
"wordsPosition" => RankingRule::WordsPosition,
"exactness" => RankingRule::Exactness,
_ => {
let captures = RANKING_RULE_REGEX.captures(s).ok_or(RankingRuleConversionError)?;
match (captures.get(1).map(|m| m.as_str()), captures.get(2)) {
(Some("asc"), Some(field)) => RankingRule::Asc(field.as_str().to_string()),
(Some("desc"), Some(field)) => RankingRule::Desc(field.as_str().to_string()),
_ => return Err(RankingRuleConversionError)
}
}
};
Ok(rule)
}
}
impl RankingRule {
pub fn field(&self) -> Option<&str> {
match self {
RankingRule::Asc(field) | RankingRule::Desc(field) => Some(field),
_ => None,
}
}
pub fn from_iter(rules: impl IntoIterator<Item = impl AsRef<str>>) -> Result<Vec<RankingRule>, RankingRuleConversionError> {
rules.into_iter()
.map(|s| RankingRule::from_str(s.as_ref()))
.collect()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SettingsUpdate {
pub ranking_rules: UpdateState<Vec<RankingRule>>,
pub distinct_attribute: UpdateState<String>,
pub primary_key: UpdateState<String>,
pub searchable_attributes: UpdateState<Vec<String>>,
pub displayed_attributes: UpdateState<HashSet<String>>,
pub stop_words: UpdateState<BTreeSet<String>>,
pub synonyms: UpdateState<BTreeMap<String, Vec<String>>>,
pub accept_new_fields: UpdateState<bool>,
}
impl Default for SettingsUpdate {
fn default() -> Self {
Self {
ranking_rules: UpdateState::Nothing,
distinct_attribute: UpdateState::Nothing,
primary_key: UpdateState::Nothing,
searchable_attributes: UpdateState::Nothing,
displayed_attributes: UpdateState::Nothing,
stop_words: UpdateState::Nothing,
synonyms: UpdateState::Nothing,
accept_new_fields: UpdateState::Nothing,
}
}
}

View File

@ -0,0 +1,50 @@
use super::BEU64;
use crate::database::MainT;
use crate::DocumentId;
use heed::types::{ByteSlice, OwnedType};
use heed::Result as ZResult;
use std::sync::Arc;
#[derive(Copy, Clone)]
pub struct DocsWords {
pub(crate) docs_words: heed::Database<OwnedType<BEU64>, ByteSlice>,
}
impl DocsWords {
pub fn put_doc_words(
self,
writer: &mut heed::RwTxn<MainT>,
document_id: DocumentId,
words: &fst::Set,
) -> ZResult<()> {
let document_id = BEU64::new(document_id.0);
let bytes = words.as_fst().as_bytes();
self.docs_words.put(writer, &document_id, bytes)
}
pub fn del_doc_words(self, writer: &mut heed::RwTxn<MainT>, document_id: DocumentId) -> ZResult<bool> {
let document_id = BEU64::new(document_id.0);
self.docs_words.delete(writer, &document_id)
}
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.docs_words.clear(writer)
}
pub fn doc_words(
self,
reader: &heed::RoTxn<MainT>,
document_id: DocumentId,
) -> ZResult<Option<fst::Set>> {
let document_id = BEU64::new(document_id.0);
match self.docs_words.get(reader, &document_id)? {
Some(bytes) => {
let len = bytes.len();
let bytes = Arc::new(bytes.to_owned());
let fst = fst::raw::Fst::from_shared_bytes(bytes, 0, len).unwrap();
Ok(Some(fst::Set::from(fst)))
}
None => Ok(None),
}
}
}

View File

@ -0,0 +1,79 @@
use heed::types::{ByteSlice, OwnedType};
use crate::database::MainT;
use heed::Result as ZResult;
use meilisearch_schema::FieldId;
use super::DocumentFieldStoredKey;
use crate::DocumentId;
#[derive(Copy, Clone)]
pub struct DocumentsFields {
pub(crate) documents_fields: heed::Database<OwnedType<DocumentFieldStoredKey>, ByteSlice>,
}
impl DocumentsFields {
pub fn put_document_field(
self,
writer: &mut heed::RwTxn<MainT>,
document_id: DocumentId,
field: FieldId,
value: &[u8],
) -> ZResult<()> {
let key = DocumentFieldStoredKey::new(document_id, field);
self.documents_fields.put(writer, &key, value)
}
pub fn del_all_document_fields(
self,
writer: &mut heed::RwTxn<MainT>,
document_id: DocumentId,
) -> ZResult<usize> {
let start = DocumentFieldStoredKey::new(document_id, FieldId::min());
let end = DocumentFieldStoredKey::new(document_id, FieldId::max());
self.documents_fields.delete_range(writer, &(start..=end))
}
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.documents_fields.clear(writer)
}
pub fn document_attribute<'txn>(
self,
reader: &'txn heed::RoTxn<MainT>,
document_id: DocumentId,
field: FieldId,
) -> ZResult<Option<&'txn [u8]>> {
let key = DocumentFieldStoredKey::new(document_id, field);
self.documents_fields.get(reader, &key)
}
pub fn document_fields<'txn>(
self,
reader: &'txn heed::RoTxn<MainT>,
document_id: DocumentId,
) -> ZResult<DocumentFieldsIter<'txn>> {
let start = DocumentFieldStoredKey::new(document_id, FieldId::min());
let end = DocumentFieldStoredKey::new(document_id, FieldId::max());
let iter = self.documents_fields.range(reader, &(start..=end))?;
Ok(DocumentFieldsIter { iter })
}
}
pub struct DocumentFieldsIter<'txn> {
iter: heed::RoRange<'txn, OwnedType<DocumentFieldStoredKey>, ByteSlice>,
}
impl<'txn> Iterator for DocumentFieldsIter<'txn> {
type Item = ZResult<(FieldId, &'txn [u8])>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(Ok((key, bytes))) => {
let field_id = FieldId(key.field_id.get());
Some(Ok((field_id, bytes)))
}
Some(Err(e)) => Some(Err(e)),
None => None,
}
}
}

View File

@ -0,0 +1,142 @@
use super::DocumentFieldIndexedKey;
use crate::database::MainT;
use crate::DocumentId;
use heed::types::OwnedType;
use heed::Result as ZResult;
use meilisearch_schema::IndexedPos;
#[derive(Copy, Clone)]
pub struct DocumentsFieldsCounts {
pub(crate) documents_fields_counts: heed::Database<OwnedType<DocumentFieldIndexedKey>, OwnedType<u16>>,
}
impl DocumentsFieldsCounts {
pub fn put_document_field_count(
self,
writer: &mut heed::RwTxn<MainT>,
document_id: DocumentId,
attribute: IndexedPos,
value: u16,
) -> ZResult<()> {
let key = DocumentFieldIndexedKey::new(document_id, attribute);
self.documents_fields_counts.put(writer, &key, &value)
}
pub fn del_all_document_fields_counts(
self,
writer: &mut heed::RwTxn<MainT>,
document_id: DocumentId,
) -> ZResult<usize> {
let start = DocumentFieldIndexedKey::new(document_id, IndexedPos::min());
let end = DocumentFieldIndexedKey::new(document_id, IndexedPos::max());
self.documents_fields_counts.delete_range(writer, &(start..=end))
}
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.documents_fields_counts.clear(writer)
}
pub fn document_field_count(
self,
reader: &heed::RoTxn<MainT>,
document_id: DocumentId,
attribute: IndexedPos,
) -> ZResult<Option<u16>> {
let key = DocumentFieldIndexedKey::new(document_id, attribute);
match self.documents_fields_counts.get(reader, &key)? {
Some(count) => Ok(Some(count)),
None => Ok(None),
}
}
pub fn document_fields_counts<'txn>(
self,
reader: &'txn heed::RoTxn<MainT>,
document_id: DocumentId,
) -> ZResult<DocumentFieldsCountsIter<'txn>> {
let start = DocumentFieldIndexedKey::new(document_id, IndexedPos::min());
let end = DocumentFieldIndexedKey::new(document_id, IndexedPos::max());
let iter = self.documents_fields_counts.range(reader, &(start..=end))?;
Ok(DocumentFieldsCountsIter { iter })
}
pub fn documents_ids<'txn>(self, reader: &'txn heed::RoTxn<MainT>) -> ZResult<DocumentsIdsIter<'txn>> {
let iter = self.documents_fields_counts.iter(reader)?;
Ok(DocumentsIdsIter {
last_seen_id: None,
iter,
})
}
pub fn all_documents_fields_counts<'txn>(
self,
reader: &'txn heed::RoTxn<MainT>,
) -> ZResult<AllDocumentsFieldsCountsIter<'txn>> {
let iter = self.documents_fields_counts.iter(reader)?;
Ok(AllDocumentsFieldsCountsIter { iter })
}
}
pub struct DocumentFieldsCountsIter<'txn> {
iter: heed::RoRange<'txn, OwnedType<DocumentFieldIndexedKey>, OwnedType<u16>>,
}
impl Iterator for DocumentFieldsCountsIter<'_> {
type Item = ZResult<(IndexedPos, u16)>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(Ok((key, count))) => {
let indexed_pos = IndexedPos(key.indexed_pos.get());
Some(Ok((indexed_pos, count)))
}
Some(Err(e)) => Some(Err(e)),
None => None,
}
}
}
pub struct DocumentsIdsIter<'txn> {
last_seen_id: Option<DocumentId>,
iter: heed::RoIter<'txn, OwnedType<DocumentFieldIndexedKey>, OwnedType<u16>>,
}
impl Iterator for DocumentsIdsIter<'_> {
type Item = ZResult<DocumentId>;
fn next(&mut self) -> Option<Self::Item> {
for result in &mut self.iter {
match result {
Ok((key, _)) => {
let document_id = DocumentId(key.docid.get());
if Some(document_id) != self.last_seen_id {
self.last_seen_id = Some(document_id);
return Some(Ok(document_id));
}
}
Err(e) => return Some(Err(e)),
}
}
None
}
}
pub struct AllDocumentsFieldsCountsIter<'txn> {
iter: heed::RoIter<'txn, OwnedType<DocumentFieldIndexedKey>, OwnedType<u16>>,
}
impl Iterator for AllDocumentsFieldsCountsIter<'_> {
type Item = ZResult<(DocumentId, IndexedPos, u16)>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(Ok((key, count))) => {
let docid = DocumentId(key.docid.get());
let indexed_pos = IndexedPos(key.indexed_pos.get());
Some(Ok((docid, indexed_pos, count)))
}
Some(Err(e)) => Some(Err(e)),
None => None,
}
}
}

View File

@ -0,0 +1,226 @@
use std::sync::Arc;
use std::collections::HashMap;
use chrono::{DateTime, Utc};
use heed::types::{ByteSlice, OwnedType, SerdeBincode, Str};
use heed::Result as ZResult;
use meilisearch_schema::Schema;
use crate::database::MainT;
use crate::RankedMap;
use crate::settings::RankingRule;
const CREATED_AT_KEY: &str = "created-at";
const RANKING_RULES_KEY: &str = "ranking-rules";
const DISTINCT_ATTRIBUTE_KEY: &str = "distinct-attribute";
const STOP_WORDS_KEY: &str = "stop-words";
const SYNONYMS_KEY: &str = "synonyms";
const CUSTOMS_KEY: &str = "customs";
const FIELDS_FREQUENCY_KEY: &str = "fields-frequency";
const NAME_KEY: &str = "name";
const NUMBER_OF_DOCUMENTS_KEY: &str = "number-of-documents";
const RANKED_MAP_KEY: &str = "ranked-map";
const SCHEMA_KEY: &str = "schema";
const UPDATED_AT_KEY: &str = "updated-at";
const WORDS_KEY: &str = "words";
pub type FreqsMap = HashMap<String, usize>;
type SerdeFreqsMap = SerdeBincode<FreqsMap>;
type SerdeDatetime = SerdeBincode<DateTime<Utc>>;
#[derive(Copy, Clone)]
pub struct Main {
pub(crate) main: heed::PolyDatabase,
}
impl Main {
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.main.clear(writer)
}
pub fn put_name(self, writer: &mut heed::RwTxn<MainT>, name: &str) -> ZResult<()> {
self.main.put::<_, Str, Str>(writer, NAME_KEY, name)
}
pub fn name(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<String>> {
Ok(self
.main
.get::<_, Str, Str>(reader, NAME_KEY)?
.map(|name| name.to_owned()))
}
pub fn put_created_at(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.main
.put::<_, Str, SerdeDatetime>(writer, CREATED_AT_KEY, &Utc::now())
}
pub fn created_at(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<DateTime<Utc>>> {
self.main.get::<_, Str, SerdeDatetime>(reader, CREATED_AT_KEY)
}
pub fn put_updated_at(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.main
.put::<_, Str, SerdeDatetime>(writer, UPDATED_AT_KEY, &Utc::now())
}
pub fn updated_at(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<DateTime<Utc>>> {
self.main.get::<_, Str, SerdeDatetime>(reader, UPDATED_AT_KEY)
}
pub fn put_words_fst(self, writer: &mut heed::RwTxn<MainT>, fst: &fst::Set) -> ZResult<()> {
let bytes = fst.as_fst().as_bytes();
self.main.put::<_, Str, ByteSlice>(writer, WORDS_KEY, bytes)
}
pub unsafe fn static_words_fst(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<fst::Set>> {
match self.main.get::<_, Str, ByteSlice>(reader, WORDS_KEY)? {
Some(bytes) => {
let bytes: &'static [u8] = std::mem::transmute(bytes);
let set = fst::Set::from_static_slice(bytes).unwrap();
Ok(Some(set))
}
None => Ok(None),
}
}
pub fn words_fst(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<fst::Set>> {
match self.main.get::<_, Str, ByteSlice>(reader, WORDS_KEY)? {
Some(bytes) => {
let len = bytes.len();
let bytes = Arc::new(bytes.to_owned());
let fst = fst::raw::Fst::from_shared_bytes(bytes, 0, len).unwrap();
Ok(Some(fst::Set::from(fst)))
}
None => Ok(None),
}
}
pub fn put_schema(self, writer: &mut heed::RwTxn<MainT>, schema: &Schema) -> ZResult<()> {
self.main.put::<_, Str, SerdeBincode<Schema>>(writer, SCHEMA_KEY, schema)
}
pub fn schema(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<Schema>> {
self.main.get::<_, Str, SerdeBincode<Schema>>(reader, SCHEMA_KEY)
}
pub fn delete_schema(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<bool> {
self.main.delete::<_, Str>(writer, SCHEMA_KEY)
}
pub fn put_ranked_map(self, writer: &mut heed::RwTxn<MainT>, ranked_map: &RankedMap) -> ZResult<()> {
self.main.put::<_, Str, SerdeBincode<RankedMap>>(writer, RANKED_MAP_KEY, &ranked_map)
}
pub fn ranked_map(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<RankedMap>> {
self.main.get::<_, Str, SerdeBincode<RankedMap>>(reader, RANKED_MAP_KEY)
}
pub fn put_synonyms_fst(self, writer: &mut heed::RwTxn<MainT>, fst: &fst::Set) -> ZResult<()> {
let bytes = fst.as_fst().as_bytes();
self.main.put::<_, Str, ByteSlice>(writer, SYNONYMS_KEY, bytes)
}
pub fn synonyms_fst(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<fst::Set>> {
match self.main.get::<_, Str, ByteSlice>(reader, SYNONYMS_KEY)? {
Some(bytes) => {
let len = bytes.len();
let bytes = Arc::new(bytes.to_owned());
let fst = fst::raw::Fst::from_shared_bytes(bytes, 0, len).unwrap();
Ok(Some(fst::Set::from(fst)))
}
None => Ok(None),
}
}
pub fn put_stop_words_fst(self, writer: &mut heed::RwTxn<MainT>, fst: &fst::Set) -> ZResult<()> {
let bytes = fst.as_fst().as_bytes();
self.main.put::<_, Str, ByteSlice>(writer, STOP_WORDS_KEY, bytes)
}
pub fn stop_words_fst(self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<fst::Set>> {
match self.main.get::<_, Str, ByteSlice>(reader, STOP_WORDS_KEY)? {
Some(bytes) => {
let len = bytes.len();
let bytes = Arc::new(bytes.to_owned());
let fst = fst::raw::Fst::from_shared_bytes(bytes, 0, len).unwrap();
Ok(Some(fst::Set::from(fst)))
}
None => Ok(None),
}
}
pub fn put_number_of_documents<F>(self, writer: &mut heed::RwTxn<MainT>, f: F) -> ZResult<u64>
where
F: Fn(u64) -> u64,
{
let new = self.number_of_documents(&*writer).map(f)?;
self.main
.put::<_, Str, OwnedType<u64>>(writer, NUMBER_OF_DOCUMENTS_KEY, &new)?;
Ok(new)
}
pub fn number_of_documents(self, reader: &heed::RoTxn<MainT>) -> ZResult<u64> {
match self
.main
.get::<_, Str, OwnedType<u64>>(reader, NUMBER_OF_DOCUMENTS_KEY)?
{
Some(value) => Ok(value),
None => Ok(0),
}
}
pub fn put_fields_frequency(
self,
writer: &mut heed::RwTxn<MainT>,
fields_frequency: &FreqsMap,
) -> ZResult<()> {
self.main
.put::<_, Str, SerdeFreqsMap>(writer, FIELDS_FREQUENCY_KEY, fields_frequency)
}
pub fn fields_frequency(&self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<FreqsMap>> {
match self
.main
.get::<_, Str, SerdeFreqsMap>(reader, FIELDS_FREQUENCY_KEY)?
{
Some(freqs) => Ok(Some(freqs)),
None => Ok(None),
}
}
pub fn ranking_rules(&self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<Vec<RankingRule>>> {
self.main.get::<_, Str, SerdeBincode<Vec<RankingRule>>>(reader, RANKING_RULES_KEY)
}
pub fn put_ranking_rules(self, writer: &mut heed::RwTxn<MainT>, value: &[RankingRule]) -> ZResult<()> {
self.main.put::<_, Str, SerdeBincode<Vec<RankingRule>>>(writer, RANKING_RULES_KEY, &value.to_vec())
}
pub fn delete_ranking_rules(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<bool> {
self.main.delete::<_, Str>(writer, RANKING_RULES_KEY)
}
pub fn distinct_attribute(&self, reader: &heed::RoTxn<MainT>) -> ZResult<Option<String>> {
if let Some(value) = self.main.get::<_, Str, Str>(reader, DISTINCT_ATTRIBUTE_KEY)? {
return Ok(Some(value.to_owned()))
}
return Ok(None)
}
pub fn put_distinct_attribute(self, writer: &mut heed::RwTxn<MainT>, value: &str) -> ZResult<()> {
self.main.put::<_, Str, Str>(writer, DISTINCT_ATTRIBUTE_KEY, value)
}
pub fn delete_distinct_attribute(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<bool> {
self.main.delete::<_, Str>(writer, DISTINCT_ATTRIBUTE_KEY)
}
pub fn put_customs(self, writer: &mut heed::RwTxn<MainT>, customs: &[u8]) -> ZResult<()> {
self.main
.put::<_, Str, ByteSlice>(writer, CUSTOMS_KEY, customs)
}
pub fn customs<'txn>(self, reader: &'txn heed::RoTxn<MainT>) -> ZResult<Option<&'txn [u8]>> {
self.main.get::<_, Str, ByteSlice>(reader, CUSTOMS_KEY)
}
}

View File

@ -0,0 +1,518 @@
mod docs_words;
mod prefix_documents_cache;
mod prefix_postings_lists_cache;
mod documents_fields;
mod documents_fields_counts;
mod main;
mod postings_lists;
mod synonyms;
mod updates;
mod updates_results;
pub use self::docs_words::DocsWords;
pub use self::prefix_documents_cache::PrefixDocumentsCache;
pub use self::prefix_postings_lists_cache::PrefixPostingsListsCache;
pub use self::documents_fields::{DocumentFieldsIter, DocumentsFields};
pub use self::documents_fields_counts::{
DocumentFieldsCountsIter, DocumentsFieldsCounts, DocumentsIdsIter,
};
pub use self::main::Main;
pub use self::postings_lists::PostingsLists;
pub use self::synonyms::Synonyms;
pub use self::updates::Updates;
pub use self::updates_results::UpdatesResults;
use std::borrow::Cow;
use std::collections::HashSet;
use std::convert::TryInto;
use std::{mem, ptr};
use heed::Result as ZResult;
use heed::{BytesEncode, BytesDecode};
use meilisearch_schema::{IndexedPos, FieldId};
use sdset::{Set, SetBuf};
use serde::de::{self, Deserialize};
use zerocopy::{AsBytes, FromBytes};
use crate::criterion::Criteria;
use crate::database::{MainT, UpdateT};
use crate::database::{UpdateEvent, UpdateEventsEmitter};
use crate::serde::Deserializer;
use crate::settings::SettingsUpdate;
use crate::{query_builder::QueryBuilder, update, DocIndex, DocumentId, Error, MResult};
type BEU64 = zerocopy::U64<byteorder::BigEndian>;
type BEU16 = zerocopy::U16<byteorder::BigEndian>;
#[derive(Debug, Copy, Clone, AsBytes, FromBytes)]
#[repr(C)]
pub struct DocumentFieldIndexedKey {
docid: BEU64,
indexed_pos: BEU16,
}
impl DocumentFieldIndexedKey {
fn new(docid: DocumentId, indexed_pos: IndexedPos) -> DocumentFieldIndexedKey {
DocumentFieldIndexedKey {
docid: BEU64::new(docid.0),
indexed_pos: BEU16::new(indexed_pos.0),
}
}
}
#[derive(Debug, Copy, Clone, AsBytes, FromBytes)]
#[repr(C)]
pub struct DocumentFieldStoredKey {
docid: BEU64,
field_id: BEU16,
}
impl DocumentFieldStoredKey {
fn new(docid: DocumentId, field_id: FieldId) -> DocumentFieldStoredKey {
DocumentFieldStoredKey {
docid: BEU64::new(docid.0),
field_id: BEU16::new(field_id.0),
}
}
}
#[derive(Default, Debug)]
pub struct Postings<'a> {
pub docids: Cow<'a, Set<DocumentId>>,
pub matches: Cow<'a, Set<DocIndex>>,
}
pub struct PostingsCodec;
impl<'a> BytesEncode<'a> for PostingsCodec {
type EItem = Postings<'a>;
fn bytes_encode(item: &'a Self::EItem) -> Option<Cow<'a, [u8]>> {
let u64_size = mem::size_of::<u64>();
let docids_size = item.docids.len() * mem::size_of::<DocumentId>();
let matches_size = item.matches.len() * mem::size_of::<DocIndex>();
let mut buffer = Vec::with_capacity(u64_size + docids_size + matches_size);
let docids_len = item.docids.len();
buffer.extend_from_slice(&docids_len.to_be_bytes());
buffer.extend_from_slice(item.docids.as_bytes());
buffer.extend_from_slice(item.matches.as_bytes());
Some(Cow::Owned(buffer))
}
}
fn aligned_to(bytes: &[u8], align: usize) -> bool {
(bytes as *const _ as *const () as usize) % align == 0
}
fn from_bytes_to_set<'a, T: 'a>(bytes: &'a [u8]) -> Option<Cow<'a, Set<T>>>
where T: Clone + FromBytes
{
match zerocopy::LayoutVerified::<_, [T]>::new_slice(bytes) {
Some(layout) => Some(Cow::Borrowed(Set::new_unchecked(layout.into_slice()))),
None => {
let len = bytes.len();
let elem_size = mem::size_of::<T>();
// ensure that it is the alignment that is wrong
// and the length is valid
if len % elem_size == 0 && !aligned_to(bytes, mem::align_of::<T>()) {
let elems = len / elem_size;
let mut vec = Vec::<T>::with_capacity(elems);
unsafe {
let dst = vec.as_mut_ptr() as *mut u8;
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, len);
vec.set_len(elems);
}
return Some(Cow::Owned(SetBuf::new_unchecked(vec)));
}
None
}
}
}
impl<'a> BytesDecode<'a> for PostingsCodec {
type DItem = Postings<'a>;
fn bytes_decode(bytes: &'a [u8]) -> Option<Self::DItem> {
let u64_size = mem::size_of::<u64>();
let docid_size = mem::size_of::<DocumentId>();
let (len_bytes, bytes) = bytes.split_at(u64_size);
let docids_len = len_bytes.try_into().ok().map(u64::from_be_bytes)? as usize;
let docids_size = docids_len * docid_size;
let docids_bytes = &bytes[..docids_size];
let matches_bytes = &bytes[docids_size..];
let docids = from_bytes_to_set(docids_bytes)?;
let matches = from_bytes_to_set(matches_bytes)?;
Some(Postings { docids, matches })
}
}
fn main_name(name: &str) -> String {
format!("store-{}", name)
}
fn postings_lists_name(name: &str) -> String {
format!("store-{}-postings-lists", name)
}
fn documents_fields_name(name: &str) -> String {
format!("store-{}-documents-fields", name)
}
fn documents_fields_counts_name(name: &str) -> String {
format!("store-{}-documents-fields-counts", name)
}
fn synonyms_name(name: &str) -> String {
format!("store-{}-synonyms", name)
}
fn docs_words_name(name: &str) -> String {
format!("store-{}-docs-words", name)
}
fn prefix_documents_cache_name(name: &str) -> String {
format!("store-{}-prefix-documents-cache", name)
}
fn prefix_postings_lists_cache_name(name: &str) -> String {
format!("store-{}-prefix-postings-lists-cache", name)
}
fn updates_name(name: &str) -> String {
format!("store-{}-updates", name)
}
fn updates_results_name(name: &str) -> String {
format!("store-{}-updates-results", name)
}
#[derive(Clone)]
pub struct Index {
pub main: Main,
pub postings_lists: PostingsLists,
pub documents_fields: DocumentsFields,
pub documents_fields_counts: DocumentsFieldsCounts,
pub synonyms: Synonyms,
pub docs_words: DocsWords,
pub prefix_documents_cache: PrefixDocumentsCache,
pub prefix_postings_lists_cache: PrefixPostingsListsCache,
pub updates: Updates,
pub updates_results: UpdatesResults,
pub(crate) updates_notifier: UpdateEventsEmitter,
}
impl Index {
pub fn document<T: de::DeserializeOwned>(
&self,
reader: &heed::RoTxn<MainT>,
attributes: Option<&HashSet<&str>>,
document_id: DocumentId,
) -> MResult<Option<T>> {
let schema = self.main.schema(reader)?;
let schema = schema.ok_or(Error::SchemaMissing)?;
let attributes = match attributes {
Some(attributes) => Some(attributes.iter().filter_map(|name| schema.id(*name)).collect()),
None => None,
};
let mut deserializer = Deserializer {
document_id,
reader,
documents_fields: self.documents_fields,
schema: &schema,
fields: attributes.as_ref(),
};
Ok(Option::<T>::deserialize(&mut deserializer)?)
}
pub fn document_attribute<T: de::DeserializeOwned>(
&self,
reader: &heed::RoTxn<MainT>,
document_id: DocumentId,
attribute: FieldId,
) -> MResult<Option<T>> {
let bytes = self
.documents_fields
.document_attribute(reader, document_id, attribute)?;
match bytes {
Some(bytes) => Ok(Some(serde_json::from_slice(bytes)?)),
None => Ok(None),
}
}
pub fn document_attribute_bytes<'txn>(
&self,
reader: &'txn heed::RoTxn<MainT>,
document_id: DocumentId,
attribute: FieldId,
) -> MResult<Option<&'txn [u8]>> {
let bytes = self
.documents_fields
.document_attribute(reader, document_id, attribute)?;
match bytes {
Some(bytes) => Ok(Some(bytes)),
None => Ok(None),
}
}
pub fn customs_update(&self, writer: &mut heed::RwTxn<UpdateT>, customs: Vec<u8>) -> ZResult<u64> {
let _ = self.updates_notifier.send(UpdateEvent::NewUpdate);
update::push_customs_update(writer, self.updates, self.updates_results, customs)
}
pub fn settings_update(&self, writer: &mut heed::RwTxn<UpdateT>, update: SettingsUpdate) -> ZResult<u64> {
let _ = self.updates_notifier.send(UpdateEvent::NewUpdate);
update::push_settings_update(writer, self.updates, self.updates_results, update)
}
pub fn documents_addition<D>(&self) -> update::DocumentsAddition<D> {
update::DocumentsAddition::new(
self.updates,
self.updates_results,
self.updates_notifier.clone(),
)
}
pub fn documents_partial_addition<D>(&self) -> update::DocumentsAddition<D> {
update::DocumentsAddition::new_partial(
self.updates,
self.updates_results,
self.updates_notifier.clone(),
)
}
pub fn documents_deletion(&self) -> update::DocumentsDeletion {
update::DocumentsDeletion::new(
self.updates,
self.updates_results,
self.updates_notifier.clone(),
)
}
pub fn clear_all(&self, writer: &mut heed::RwTxn<UpdateT>) -> MResult<u64> {
let _ = self.updates_notifier.send(UpdateEvent::NewUpdate);
update::push_clear_all(writer, self.updates, self.updates_results)
}
pub fn current_update_id(&self, reader: &heed::RoTxn<UpdateT>) -> MResult<Option<u64>> {
match self.updates.last_update(reader)? {
Some((id, _)) => Ok(Some(id)),
None => Ok(None),
}
}
pub fn update_status(
&self,
reader: &heed::RoTxn<UpdateT>,
update_id: u64,
) -> MResult<Option<update::UpdateStatus>> {
update::update_status(reader, self.updates, self.updates_results, update_id)
}
pub fn all_updates_status(&self, reader: &heed::RoTxn<UpdateT>) -> MResult<Vec<update::UpdateStatus>> {
let mut updates = Vec::new();
let mut last_update_result_id = 0;
// retrieve all updates results
if let Some((last_id, _)) = self.updates_results.last_update(reader)? {
updates.reserve(last_id as usize);
for id in 0..=last_id {
if let Some(update) = self.update_status(reader, id)? {
updates.push(update);
last_update_result_id = id + 1;
}
}
}
// retrieve all enqueued updates
if let Some((last_id, _)) = self.updates.last_update(reader)? {
for id in last_update_result_id..=last_id {
if let Some(update) = self.update_status(reader, id)? {
updates.push(update);
}
}
}
Ok(updates)
}
pub fn query_builder(&self) -> QueryBuilder {
QueryBuilder::new(
self.main,
self.postings_lists,
self.documents_fields_counts,
self.synonyms,
self.prefix_documents_cache,
self.prefix_postings_lists_cache,
)
}
pub fn query_builder_with_criteria<'c, 'f, 'd>(
&self,
criteria: Criteria<'c>,
) -> QueryBuilder<'c, 'f, 'd> {
QueryBuilder::with_criteria(
self.main,
self.postings_lists,
self.documents_fields_counts,
self.synonyms,
self.prefix_documents_cache,
self.prefix_postings_lists_cache,
criteria,
)
}
}
pub fn create(
env: &heed::Env,
update_env: &heed::Env,
name: &str,
updates_notifier: UpdateEventsEmitter,
) -> MResult<Index> {
// create all the store names
let main_name = main_name(name);
let postings_lists_name = postings_lists_name(name);
let documents_fields_name = documents_fields_name(name);
let documents_fields_counts_name = documents_fields_counts_name(name);
let synonyms_name = synonyms_name(name);
let docs_words_name = docs_words_name(name);
let prefix_documents_cache_name = prefix_documents_cache_name(name);
let prefix_postings_lists_cache_name = prefix_postings_lists_cache_name(name);
let updates_name = updates_name(name);
let updates_results_name = updates_results_name(name);
// open all the stores
let main = env.create_poly_database(Some(&main_name))?;
let postings_lists = env.create_database(Some(&postings_lists_name))?;
let documents_fields = env.create_database(Some(&documents_fields_name))?;
let documents_fields_counts = env.create_database(Some(&documents_fields_counts_name))?;
let synonyms = env.create_database(Some(&synonyms_name))?;
let docs_words = env.create_database(Some(&docs_words_name))?;
let prefix_documents_cache = env.create_database(Some(&prefix_documents_cache_name))?;
let prefix_postings_lists_cache = env.create_database(Some(&prefix_postings_lists_cache_name))?;
let updates = update_env.create_database(Some(&updates_name))?;
let updates_results = update_env.create_database(Some(&updates_results_name))?;
Ok(Index {
main: Main { main },
postings_lists: PostingsLists { postings_lists },
documents_fields: DocumentsFields { documents_fields },
documents_fields_counts: DocumentsFieldsCounts { documents_fields_counts },
synonyms: Synonyms { synonyms },
docs_words: DocsWords { docs_words },
prefix_postings_lists_cache: PrefixPostingsListsCache { prefix_postings_lists_cache },
prefix_documents_cache: PrefixDocumentsCache { prefix_documents_cache },
updates: Updates { updates },
updates_results: UpdatesResults { updates_results },
updates_notifier,
})
}
pub fn open(
env: &heed::Env,
update_env: &heed::Env,
name: &str,
updates_notifier: UpdateEventsEmitter,
) -> MResult<Option<Index>> {
// create all the store names
let main_name = main_name(name);
let postings_lists_name = postings_lists_name(name);
let documents_fields_name = documents_fields_name(name);
let documents_fields_counts_name = documents_fields_counts_name(name);
let synonyms_name = synonyms_name(name);
let docs_words_name = docs_words_name(name);
let prefix_documents_cache_name = prefix_documents_cache_name(name);
let prefix_postings_lists_cache_name = prefix_postings_lists_cache_name(name);
let updates_name = updates_name(name);
let updates_results_name = updates_results_name(name);
// open all the stores
let main = match env.open_poly_database(Some(&main_name))? {
Some(main) => main,
None => return Ok(None),
};
let postings_lists = match env.open_database(Some(&postings_lists_name))? {
Some(postings_lists) => postings_lists,
None => return Ok(None),
};
let documents_fields = match env.open_database(Some(&documents_fields_name))? {
Some(documents_fields) => documents_fields,
None => return Ok(None),
};
let documents_fields_counts = match env.open_database(Some(&documents_fields_counts_name))? {
Some(documents_fields_counts) => documents_fields_counts,
None => return Ok(None),
};
let synonyms = match env.open_database(Some(&synonyms_name))? {
Some(synonyms) => synonyms,
None => return Ok(None),
};
let docs_words = match env.open_database(Some(&docs_words_name))? {
Some(docs_words) => docs_words,
None => return Ok(None),
};
let prefix_documents_cache = match env.open_database(Some(&prefix_documents_cache_name))? {
Some(prefix_documents_cache) => prefix_documents_cache,
None => return Ok(None),
};
let prefix_postings_lists_cache = match env.open_database(Some(&prefix_postings_lists_cache_name))? {
Some(prefix_postings_lists_cache) => prefix_postings_lists_cache,
None => return Ok(None),
};
let updates = match update_env.open_database(Some(&updates_name))? {
Some(updates) => updates,
None => return Ok(None),
};
let updates_results = match update_env.open_database(Some(&updates_results_name))? {
Some(updates_results) => updates_results,
None => return Ok(None),
};
Ok(Some(Index {
main: Main { main },
postings_lists: PostingsLists { postings_lists },
documents_fields: DocumentsFields { documents_fields },
documents_fields_counts: DocumentsFieldsCounts { documents_fields_counts },
synonyms: Synonyms { synonyms },
docs_words: DocsWords { docs_words },
prefix_documents_cache: PrefixDocumentsCache { prefix_documents_cache },
prefix_postings_lists_cache: PrefixPostingsListsCache { prefix_postings_lists_cache },
updates: Updates { updates },
updates_results: UpdatesResults { updates_results },
updates_notifier,
}))
}
pub fn clear(
writer: &mut heed::RwTxn<MainT>,
update_writer: &mut heed::RwTxn<UpdateT>,
index: &Index,
) -> MResult<()> {
// clear all the stores
index.main.clear(writer)?;
index.postings_lists.clear(writer)?;
index.documents_fields.clear(writer)?;
index.documents_fields_counts.clear(writer)?;
index.synonyms.clear(writer)?;
index.docs_words.clear(writer)?;
index.prefix_documents_cache.clear(writer)?;
index.prefix_postings_lists_cache.clear(writer)?;
index.updates.clear(update_writer)?;
index.updates_results.clear(update_writer)?;
Ok(())
}

View File

@ -0,0 +1,47 @@
use std::borrow::Cow;
use heed::Result as ZResult;
use heed::types::ByteSlice;
use sdset::{Set, SetBuf};
use slice_group_by::GroupBy;
use crate::database::MainT;
use crate::DocIndex;
use crate::store::{Postings, PostingsCodec};
#[derive(Copy, Clone)]
pub struct PostingsLists {
pub(crate) postings_lists: heed::Database<ByteSlice, PostingsCodec>,
}
impl PostingsLists {
pub fn put_postings_list(
self,
writer: &mut heed::RwTxn<MainT>,
word: &[u8],
matches: &Set<DocIndex>,
) -> ZResult<()> {
let docids = matches.linear_group_by_key(|m| m.document_id).map(|g| g[0].document_id).collect();
let docids = Cow::Owned(SetBuf::new_unchecked(docids));
let matches = Cow::Borrowed(matches);
let postings = Postings { docids, matches };
self.postings_lists.put(writer, word, &postings)
}
pub fn del_postings_list(self, writer: &mut heed::RwTxn<MainT>, word: &[u8]) -> ZResult<bool> {
self.postings_lists.delete(writer, word)
}
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.postings_lists.clear(writer)
}
pub fn postings_list<'txn>(
self,
reader: &'txn heed::RoTxn<MainT>,
word: &[u8],
) -> ZResult<Option<Postings<'txn>>> {
self.postings_lists.get(reader, word)
}
}

View File

@ -0,0 +1,80 @@
use std::borrow::Cow;
use heed::types::{OwnedType, CowSlice};
use heed::Result as ZResult;
use zerocopy::{AsBytes, FromBytes};
use super::BEU64;
use crate::{DocumentId, Highlight};
use crate::database::MainT;
#[derive(Debug, Copy, Clone, AsBytes, FromBytes)]
#[repr(C)]
pub struct PrefixKey {
prefix: [u8; 4],
index: BEU64,
docid: BEU64,
}
impl PrefixKey {
pub fn new(prefix: [u8; 4], index: u64, docid: u64) -> PrefixKey {
PrefixKey {
prefix,
index: BEU64::new(index),
docid: BEU64::new(docid),
}
}
}
#[derive(Copy, Clone)]
pub struct PrefixDocumentsCache {
pub(crate) prefix_documents_cache: heed::Database<OwnedType<PrefixKey>, CowSlice<Highlight>>,
}
impl PrefixDocumentsCache {
pub fn put_prefix_document(
self,
writer: &mut heed::RwTxn<MainT>,
prefix: [u8; 4],
index: usize,
docid: DocumentId,
highlights: &[Highlight],
) -> ZResult<()> {
let key = PrefixKey::new(prefix, index as u64, docid.0);
self.prefix_documents_cache.put(writer, &key, highlights)
}
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.prefix_documents_cache.clear(writer)
}
pub fn prefix_documents<'txn>(
self,
reader: &'txn heed::RoTxn<MainT>,
prefix: [u8; 4],
) -> ZResult<PrefixDocumentsIter<'txn>> {
let start = PrefixKey::new(prefix, 0, 0);
let end = PrefixKey::new(prefix, u64::max_value(), u64::max_value());
let iter = self.prefix_documents_cache.range(reader, &(start..=end))?;
Ok(PrefixDocumentsIter { iter })
}
}
pub struct PrefixDocumentsIter<'txn> {
iter: heed::RoRange<'txn, OwnedType<PrefixKey>, CowSlice<Highlight>>,
}
impl<'txn> Iterator for PrefixDocumentsIter<'txn> {
type Item = ZResult<(DocumentId, Cow<'txn, [Highlight]>)>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(Ok((key, highlights))) => {
let docid = DocumentId(key.docid.get());
Some(Ok((docid, highlights)))
}
Some(Err(e)) => Some(Err(e)),
None => None,
}
}
}

View File

@ -0,0 +1,45 @@
use std::borrow::Cow;
use heed::Result as ZResult;
use heed::types::OwnedType;
use sdset::{Set, SetBuf};
use slice_group_by::GroupBy;
use crate::database::MainT;
use crate::DocIndex;
use crate::store::{PostingsCodec, Postings};
#[derive(Copy, Clone)]
pub struct PrefixPostingsListsCache {
pub(crate) prefix_postings_lists_cache: heed::Database<OwnedType<[u8; 4]>, PostingsCodec>,
}
impl PrefixPostingsListsCache {
pub fn put_prefix_postings_list(
self,
writer: &mut heed::RwTxn<MainT>,
prefix: [u8; 4],
matches: &Set<DocIndex>,
) -> ZResult<()>
{
let docids = matches.linear_group_by_key(|m| m.document_id).map(|g| g[0].document_id).collect();
let docids = Cow::Owned(SetBuf::new_unchecked(docids));
let matches = Cow::Borrowed(matches);
let postings = Postings { docids, matches };
self.prefix_postings_lists_cache.put(writer, &prefix, &postings)
}
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.prefix_postings_lists_cache.clear(writer)
}
pub fn prefix_postings_list<'txn>(
self,
reader: &'txn heed::RoTxn<MainT>,
prefix: [u8; 4],
) -> ZResult<Option<Postings<'txn>>>
{
self.prefix_postings_lists_cache.get(reader, &prefix)
}
}

View File

@ -0,0 +1,41 @@
use heed::types::ByteSlice;
use crate::database::MainT;
use heed::Result as ZResult;
use std::sync::Arc;
#[derive(Copy, Clone)]
pub struct Synonyms {
pub(crate) synonyms: heed::Database<ByteSlice, ByteSlice>,
}
impl Synonyms {
pub fn put_synonyms(
self,
writer: &mut heed::RwTxn<MainT>,
word: &[u8],
synonyms: &fst::Set,
) -> ZResult<()> {
let bytes = synonyms.as_fst().as_bytes();
self.synonyms.put(writer, word, bytes)
}
pub fn del_synonyms(self, writer: &mut heed::RwTxn<MainT>, word: &[u8]) -> ZResult<bool> {
self.synonyms.delete(writer, word)
}
pub fn clear(self, writer: &mut heed::RwTxn<MainT>) -> ZResult<()> {
self.synonyms.clear(writer)
}
pub fn synonyms(self, reader: &heed::RoTxn<MainT>, word: &[u8]) -> ZResult<Option<fst::Set>> {
match self.synonyms.get(reader, word)? {
Some(bytes) => {
let len = bytes.len();
let bytes = Arc::new(bytes.to_owned());
let fst = fst::raw::Fst::from_shared_bytes(bytes, 0, len).unwrap();
Ok(Some(fst::Set::from(fst)))
}
None => Ok(None),
}
}
}

View File

@ -0,0 +1,65 @@
use super::BEU64;
use crate::database::UpdateT;
use crate::update::Update;
use heed::types::{OwnedType, SerdeJson};
use heed::Result as ZResult;
#[derive(Copy, Clone)]
pub struct Updates {
pub(crate) updates: heed::Database<OwnedType<BEU64>, SerdeJson<Update>>,
}
impl Updates {
// TODO do not trigger deserialize if possible
pub fn last_update(self, reader: &heed::RoTxn<UpdateT>) -> ZResult<Option<(u64, Update)>> {
match self.updates.last(reader)? {
Some((key, data)) => Ok(Some((key.get(), data))),
None => Ok(None),
}
}
// TODO do not trigger deserialize if possible
pub fn first_update(self, reader: &heed::RoTxn<UpdateT>) -> ZResult<Option<(u64, Update)>> {
match self.updates.first(reader)? {
Some((key, data)) => Ok(Some((key.get(), data))),
None => Ok(None),
}
}
// TODO do not trigger deserialize if possible
pub fn get(self, reader: &heed::RoTxn<UpdateT>, update_id: u64) -> ZResult<Option<Update>> {
let update_id = BEU64::new(update_id);
self.updates.get(reader, &update_id)
}
pub fn put_update(
self,
writer: &mut heed::RwTxn<UpdateT>,
update_id: u64,
update: &Update,
) -> ZResult<()> {
// TODO prefer using serde_json?
let update_id = BEU64::new(update_id);
self.updates.put(writer, &update_id, update)
}
pub fn del_update(self, writer: &mut heed::RwTxn<UpdateT>, update_id: u64) -> ZResult<bool> {
let update_id = BEU64::new(update_id);
self.updates.delete(writer, &update_id)
}
pub fn pop_front(self, writer: &mut heed::RwTxn<UpdateT>) -> ZResult<Option<(u64, Update)>> {
match self.first_update(writer)? {
Some((update_id, update)) => {
let key = BEU64::new(update_id);
self.updates.delete(writer, &key)?;
Ok(Some((update_id, update)))
}
None => Ok(None),
}
}
pub fn clear(self, writer: &mut heed::RwTxn<UpdateT>) -> ZResult<()> {
self.updates.clear(writer)
}
}

View File

@ -0,0 +1,45 @@
use super::BEU64;
use crate::database::UpdateT;
use crate::update::ProcessedUpdateResult;
use heed::types::{OwnedType, SerdeJson};
use heed::Result as ZResult;
#[derive(Copy, Clone)]
pub struct UpdatesResults {
pub(crate) updates_results: heed::Database<OwnedType<BEU64>, SerdeJson<ProcessedUpdateResult>>,
}
impl UpdatesResults {
pub fn last_update(
self,
reader: &heed::RoTxn<UpdateT>,
) -> ZResult<Option<(u64, ProcessedUpdateResult)>> {
match self.updates_results.last(reader)? {
Some((key, data)) => Ok(Some((key.get(), data))),
None => Ok(None),
}
}
pub fn put_update_result(
self,
writer: &mut heed::RwTxn<UpdateT>,
update_id: u64,
update_result: &ProcessedUpdateResult,
) -> ZResult<()> {
let update_id = BEU64::new(update_id);
self.updates_results.put(writer, &update_id, update_result)
}
pub fn update_result(
self,
reader: &heed::RoTxn<UpdateT>,
update_id: u64,
) -> ZResult<Option<ProcessedUpdateResult>> {
let update_id = BEU64::new(update_id);
self.updates_results.get(reader, &update_id)
}
pub fn clear(self, writer: &mut heed::RwTxn<UpdateT>) -> ZResult<()> {
self.updates_results.clear(writer)
}
}

View File

@ -0,0 +1,32 @@
use crate::database::{MainT, UpdateT};
use crate::update::{next_update_id, Update};
use crate::{store, MResult, RankedMap};
pub fn apply_clear_all(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
) -> MResult<()> {
index.main.put_words_fst(writer, &fst::Set::default())?;
index.main.put_ranked_map(writer, &RankedMap::default())?;
index.main.put_number_of_documents(writer, |_| 0)?;
index.documents_fields.clear(writer)?;
index.documents_fields_counts.clear(writer)?;
index.postings_lists.clear(writer)?;
index.docs_words.clear(writer)?;
index.prefix_documents_cache.clear(writer)?;
index.prefix_postings_lists_cache.clear(writer)?;
Ok(())
}
pub fn push_clear_all(
writer: &mut heed::RwTxn<UpdateT>,
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
) -> MResult<u64> {
let last_update_id = next_update_id(writer, updates_store, updates_results_store)?;
let update = Update::clear_all();
updates_store.put_update(writer, last_update_id, &update)?;
Ok(last_update_id)
}

View File

@ -0,0 +1,27 @@
use heed::Result as ZResult;
use crate::database::{MainT, UpdateT};
use crate::store;
use crate::update::{next_update_id, Update};
pub fn apply_customs_update(
writer: &mut heed::RwTxn<MainT>,
main_store: store::Main,
customs: &[u8],
) -> ZResult<()> {
main_store.put_customs(writer, customs)
}
pub fn push_customs_update(
writer: &mut heed::RwTxn<UpdateT>,
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
customs: Vec<u8>,
) -> ZResult<u64> {
let last_update_id = next_update_id(writer, updates_store, updates_results_store)?;
let update = Update::customs(customs);
updates_store.put_update(writer, last_update_id, &update)?;
Ok(last_update_id)
}

View File

@ -0,0 +1,382 @@
use std::collections::HashMap;
use fst::{set::OpBuilder, SetBuilder};
use indexmap::IndexMap;
use sdset::{duo::Union, SetOperation};
use serde::{Deserialize, Serialize};
use crate::database::{MainT, UpdateT};
use crate::database::{UpdateEvent, UpdateEventsEmitter};
use crate::raw_indexer::RawIndexer;
use crate::serde::{extract_document_id, serialize_value_with_id, Deserializer, Serializer};
use crate::store;
use crate::update::{apply_documents_deletion, compute_short_prefixes, next_update_id, Update};
use crate::{Error, MResult, RankedMap};
pub struct DocumentsAddition<D> {
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
updates_notifier: UpdateEventsEmitter,
documents: Vec<D>,
is_partial: bool,
}
impl<D> DocumentsAddition<D> {
pub fn new(
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
updates_notifier: UpdateEventsEmitter,
) -> DocumentsAddition<D> {
DocumentsAddition {
updates_store,
updates_results_store,
updates_notifier,
documents: Vec::new(),
is_partial: false,
}
}
pub fn new_partial(
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
updates_notifier: UpdateEventsEmitter,
) -> DocumentsAddition<D> {
DocumentsAddition {
updates_store,
updates_results_store,
updates_notifier,
documents: Vec::new(),
is_partial: true,
}
}
pub fn update_document(&mut self, document: D) {
self.documents.push(document);
}
pub fn finalize(self, writer: &mut heed::RwTxn<UpdateT>) -> MResult<u64>
where
D: serde::Serialize,
{
let _ = self.updates_notifier.send(UpdateEvent::NewUpdate);
let update_id = push_documents_addition(
writer,
self.updates_store,
self.updates_results_store,
self.documents,
self.is_partial,
)?;
Ok(update_id)
}
}
impl<D> Extend<D> for DocumentsAddition<D> {
fn extend<T: IntoIterator<Item = D>>(&mut self, iter: T) {
self.documents.extend(iter)
}
}
pub fn push_documents_addition<D: serde::Serialize>(
writer: &mut heed::RwTxn<UpdateT>,
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
addition: Vec<D>,
is_partial: bool,
) -> MResult<u64> {
let mut values = Vec::with_capacity(addition.len());
for add in addition {
let vec = serde_json::to_vec(&add)?;
let add = serde_json::from_slice(&vec)?;
values.push(add);
}
let last_update_id = next_update_id(writer, updates_store, updates_results_store)?;
let update = if is_partial {
Update::documents_partial(values)
} else {
Update::documents_addition(values)
};
updates_store.put_update(writer, last_update_id, &update)?;
Ok(last_update_id)
}
pub fn apply_documents_addition<'a, 'b>(
writer: &'a mut heed::RwTxn<'b, MainT>,
index: &store::Index,
addition: Vec<IndexMap<String, serde_json::Value>>,
) -> MResult<()> {
let mut documents_additions = HashMap::new();
let mut schema = match index.main.schema(writer)? {
Some(schema) => schema,
None => return Err(Error::SchemaMissing),
};
let primary_key = schema.primary_key().ok_or(Error::MissingPrimaryKey)?;
// 1. store documents ids for future deletion
for document in addition {
let document_id = match extract_document_id(&primary_key, &document)? {
Some(id) => id,
None => return Err(Error::MissingDocumentId),
};
documents_additions.insert(document_id, document);
}
// 2. remove the documents posting lists
let number_of_inserted_documents = documents_additions.len();
let documents_ids = documents_additions.iter().map(|(id, _)| *id).collect();
apply_documents_deletion(writer, index, documents_ids)?;
let mut ranked_map = match index.main.ranked_map(writer)? {
Some(ranked_map) => ranked_map,
None => RankedMap::default(),
};
let stop_words = match index.main.stop_words_fst(writer)? {
Some(stop_words) => stop_words,
None => fst::Set::default(),
};
// 3. index the documents fields in the stores
let mut indexer = RawIndexer::new(stop_words);
for (document_id, document) in documents_additions {
let serializer = Serializer {
txn: writer,
schema: &mut schema,
document_store: index.documents_fields,
document_fields_counts: index.documents_fields_counts,
indexer: &mut indexer,
ranked_map: &mut ranked_map,
document_id,
};
document.serialize(serializer)?;
}
write_documents_addition_index(
writer,
index,
&ranked_map,
number_of_inserted_documents,
indexer,
)?;
index.main.put_schema(writer, &schema)?;
Ok(())
}
pub fn apply_documents_partial_addition<'a, 'b>(
writer: &'a mut heed::RwTxn<'b, MainT>,
index: &store::Index,
addition: Vec<IndexMap<String, serde_json::Value>>,
) -> MResult<()> {
let mut documents_additions = HashMap::new();
let mut schema = match index.main.schema(writer)? {
Some(schema) => schema,
None => return Err(Error::SchemaMissing),
};
let primary_key = schema.primary_key().ok_or(Error::MissingPrimaryKey)?;
// 1. store documents ids for future deletion
for mut document in addition {
let document_id = match extract_document_id(&primary_key, &document)? {
Some(id) => id,
None => return Err(Error::MissingDocumentId),
};
let mut deserializer = Deserializer {
document_id,
reader: writer,
documents_fields: index.documents_fields,
schema: &schema,
fields: None,
};
// retrieve the old document and
// update the new one with missing keys found in the old one
let result = Option::<HashMap<String, serde_json::Value>>::deserialize(&mut deserializer)?;
if let Some(old_document) = result {
for (key, value) in old_document {
document.entry(key).or_insert(value);
}
}
documents_additions.insert(document_id, document);
}
// 2. remove the documents posting lists
let number_of_inserted_documents = documents_additions.len();
let documents_ids = documents_additions.iter().map(|(id, _)| *id).collect();
apply_documents_deletion(writer, index, documents_ids)?;
let mut ranked_map = match index.main.ranked_map(writer)? {
Some(ranked_map) => ranked_map,
None => RankedMap::default(),
};
let stop_words = match index.main.stop_words_fst(writer)? {
Some(stop_words) => stop_words,
None => fst::Set::default(),
};
// 3. index the documents fields in the stores
let mut indexer = RawIndexer::new(stop_words);
for (document_id, document) in documents_additions {
let serializer = Serializer {
txn: writer,
schema: &mut schema,
document_store: index.documents_fields,
document_fields_counts: index.documents_fields_counts,
indexer: &mut indexer,
ranked_map: &mut ranked_map,
document_id,
};
document.serialize(serializer)?;
}
write_documents_addition_index(
writer,
index,
&ranked_map,
number_of_inserted_documents,
indexer,
)?;
index.main.put_schema(writer, &schema)?;
Ok(())
}
pub fn reindex_all_documents(writer: &mut heed::RwTxn<MainT>, index: &store::Index) -> MResult<()> {
let schema = match index.main.schema(writer)? {
Some(schema) => schema,
None => return Err(Error::SchemaMissing),
};
let mut ranked_map = RankedMap::default();
// 1. retrieve all documents ids
let mut documents_ids_to_reindex = Vec::new();
for result in index.documents_fields_counts.documents_ids(writer)? {
let document_id = result?;
documents_ids_to_reindex.push(document_id);
}
// 2. remove the documents posting lists
index.main.put_words_fst(writer, &fst::Set::default())?;
index.main.put_ranked_map(writer, &ranked_map)?;
index.main.put_number_of_documents(writer, |_| 0)?;
index.postings_lists.clear(writer)?;
index.docs_words.clear(writer)?;
let stop_words = match index.main.stop_words_fst(writer)? {
Some(stop_words) => stop_words,
None => fst::Set::default(),
};
let number_of_inserted_documents = documents_ids_to_reindex.len();
let mut indexer = RawIndexer::new(stop_words);
let mut ram_store = HashMap::new();
for document_id in documents_ids_to_reindex {
for result in index.documents_fields.document_fields(writer, document_id)? {
let (field_id, bytes) = result?;
let value: serde_json::Value = serde_json::from_slice(bytes)?;
ram_store.insert((document_id, field_id), value);
}
for ((docid, field_id), value) in ram_store.drain() {
serialize_value_with_id(
writer,
field_id,
&schema,
docid,
index.documents_fields,
index.documents_fields_counts,
&mut indexer,
&mut ranked_map,
&value
)?;
}
}
// 4. write the new index in the main store
write_documents_addition_index(
writer,
index,
&ranked_map,
number_of_inserted_documents,
indexer,
)?;
index.main.put_schema(writer, &schema)?;
Ok(())
}
pub fn write_documents_addition_index(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
ranked_map: &RankedMap,
number_of_inserted_documents: usize,
indexer: RawIndexer,
) -> MResult<()> {
let indexed = indexer.build();
let mut delta_words_builder = SetBuilder::memory();
for (word, delta_set) in indexed.words_doc_indexes {
delta_words_builder.insert(&word).unwrap();
let set = match index.postings_lists.postings_list(writer, &word)? {
Some(postings) => Union::new(&postings.matches, &delta_set).into_set_buf(),
None => delta_set,
};
index.postings_lists.put_postings_list(writer, &word, &set)?;
}
for (id, words) in indexed.docs_words {
index.docs_words.put_doc_words(writer, id, &words)?;
}
let delta_words = delta_words_builder
.into_inner()
.and_then(fst::Set::from_bytes)
.unwrap();
let words = match index.main.words_fst(writer)? {
Some(words) => {
let op = OpBuilder::new()
.add(words.stream())
.add(delta_words.stream())
.r#union();
let mut words_builder = SetBuilder::memory();
words_builder.extend_stream(op).unwrap();
words_builder
.into_inner()
.and_then(fst::Set::from_bytes)
.unwrap()
}
None => delta_words,
};
index.main.put_words_fst(writer, &words)?;
index.main.put_ranked_map(writer, ranked_map)?;
index.main.put_number_of_documents(writer, |old| old + number_of_inserted_documents as u64)?;
compute_short_prefixes(writer, index)?;
Ok(())
}

View File

@ -0,0 +1,180 @@
use std::collections::{BTreeSet, HashMap, HashSet};
use fst::{SetBuilder, Streamer};
use meilisearch_schema::Schema;
use sdset::{duo::DifferenceByKey, SetBuf, SetOperation};
use crate::database::{MainT, UpdateT};
use crate::database::{UpdateEvent, UpdateEventsEmitter};
use crate::serde::extract_document_id;
use crate::store;
use crate::update::{next_update_id, compute_short_prefixes, Update};
use crate::{DocumentId, Error, MResult, RankedMap};
pub struct DocumentsDeletion {
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
updates_notifier: UpdateEventsEmitter,
documents: Vec<DocumentId>,
}
impl DocumentsDeletion {
pub fn new(
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
updates_notifier: UpdateEventsEmitter,
) -> DocumentsDeletion {
DocumentsDeletion {
updates_store,
updates_results_store,
updates_notifier,
documents: Vec::new(),
}
}
pub fn delete_document_by_id(&mut self, document_id: DocumentId) {
self.documents.push(document_id);
}
pub fn delete_document<D>(&mut self, schema: &Schema, document: D) -> MResult<()>
where
D: serde::Serialize,
{
let primary_key = schema.primary_key().ok_or(Error::MissingPrimaryKey)?;
let document_id = match extract_document_id(&primary_key, &document)? {
Some(id) => id,
None => return Err(Error::MissingDocumentId),
};
self.delete_document_by_id(document_id);
Ok(())
}
pub fn finalize(self, writer: &mut heed::RwTxn<UpdateT>) -> MResult<u64> {
let _ = self.updates_notifier.send(UpdateEvent::NewUpdate);
let update_id = push_documents_deletion(
writer,
self.updates_store,
self.updates_results_store,
self.documents,
)?;
Ok(update_id)
}
}
impl Extend<DocumentId> for DocumentsDeletion {
fn extend<T: IntoIterator<Item = DocumentId>>(&mut self, iter: T) {
self.documents.extend(iter)
}
}
pub fn push_documents_deletion(
writer: &mut heed::RwTxn<UpdateT>,
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
deletion: Vec<DocumentId>,
) -> MResult<u64> {
let last_update_id = next_update_id(writer, updates_store, updates_results_store)?;
let update = Update::documents_deletion(deletion);
updates_store.put_update(writer, last_update_id, &update)?;
Ok(last_update_id)
}
pub fn apply_documents_deletion(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
deletion: Vec<DocumentId>,
) -> MResult<()> {
let idset = SetBuf::from_dirty(deletion);
let schema = match index.main.schema(writer)? {
Some(schema) => schema,
None => return Err(Error::SchemaMissing),
};
let mut ranked_map = match index.main.ranked_map(writer)? {
Some(ranked_map) => ranked_map,
None => RankedMap::default(),
};
// collect the ranked attributes according to the schema
let ranked_fields = schema.ranked();
let mut words_document_ids = HashMap::new();
for id in idset {
// remove all the ranked attributes from the ranked_map
for ranked_attr in ranked_fields {
ranked_map.remove(id, *ranked_attr);
}
if let Some(words) = index.docs_words.doc_words(writer, id)? {
let mut stream = words.stream();
while let Some(word) = stream.next() {
let word = word.to_vec();
words_document_ids
.entry(word)
.or_insert_with(Vec::new)
.push(id);
}
}
}
let mut deleted_documents = HashSet::new();
let mut removed_words = BTreeSet::new();
for (word, document_ids) in words_document_ids {
let document_ids = SetBuf::from_dirty(document_ids);
if let Some(postings) = index.postings_lists.postings_list(writer, &word)? {
let op = DifferenceByKey::new(&postings.matches, &document_ids, |d| d.document_id, |id| *id);
let doc_indexes = op.into_set_buf();
if !doc_indexes.is_empty() {
index.postings_lists.put_postings_list(writer, &word, &doc_indexes)?;
} else {
index.postings_lists.del_postings_list(writer, &word)?;
removed_words.insert(word);
}
}
for id in document_ids {
index.documents_fields_counts.del_all_document_fields_counts(writer, id)?;
if index.documents_fields.del_all_document_fields(writer, id)? != 0 {
deleted_documents.insert(id);
}
}
}
let deleted_documents_len = deleted_documents.len() as u64;
for id in deleted_documents {
index.docs_words.del_doc_words(writer, id)?;
}
let removed_words = fst::Set::from_iter(removed_words).unwrap();
let words = match index.main.words_fst(writer)? {
Some(words_set) => {
let op = fst::set::OpBuilder::new()
.add(words_set.stream())
.add(removed_words.stream())
.difference();
let mut words_builder = SetBuilder::memory();
words_builder.extend_stream(op).unwrap();
words_builder
.into_inner()
.and_then(fst::Set::from_bytes)
.unwrap()
}
None => fst::Set::default(),
};
index.main.put_words_fst(writer, &words)?;
index.main.put_ranked_map(writer, &ranked_map)?;
index.main.put_number_of_documents(writer, |old| old - deleted_documents_len)?;
compute_short_prefixes(writer, index)?;
Ok(())
}

View File

@ -0,0 +1,361 @@
mod clear_all;
mod customs_update;
mod documents_addition;
mod documents_deletion;
mod settings_update;
pub use self::clear_all::{apply_clear_all, push_clear_all};
pub use self::customs_update::{apply_customs_update, push_customs_update};
pub use self::documents_addition::{
apply_documents_addition, apply_documents_partial_addition, DocumentsAddition,
};
pub use self::documents_deletion::{apply_documents_deletion, DocumentsDeletion};
pub use self::settings_update::{apply_settings_update, push_settings_update};
use std::cmp;
use std::time::Instant;
use chrono::{DateTime, Utc};
use fst::{IntoStreamer, Streamer};
use heed::Result as ZResult;
use indexmap::IndexMap;
use log::debug;
use sdset::Set;
use serde::{Deserialize, Serialize};
use crate::{store, DocumentId, MResult};
use crate::database::{MainT, UpdateT};
use crate::settings::SettingsUpdate;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Update {
data: UpdateData,
enqueued_at: DateTime<Utc>,
}
impl Update {
fn clear_all() -> Update {
Update {
data: UpdateData::ClearAll,
enqueued_at: Utc::now(),
}
}
fn customs(data: Vec<u8>) -> Update {
Update {
data: UpdateData::Customs(data),
enqueued_at: Utc::now(),
}
}
fn documents_addition(data: Vec<IndexMap<String, serde_json::Value>>) -> Update {
Update {
data: UpdateData::DocumentsAddition(data),
enqueued_at: Utc::now(),
}
}
fn documents_partial(data: Vec<IndexMap<String, serde_json::Value>>) -> Update {
Update {
data: UpdateData::DocumentsPartial(data),
enqueued_at: Utc::now(),
}
}
fn documents_deletion(data: Vec<DocumentId>) -> Update {
Update {
data: UpdateData::DocumentsDeletion(data),
enqueued_at: Utc::now(),
}
}
fn settings(data: SettingsUpdate) -> Update {
Update {
data: UpdateData::Settings(data),
enqueued_at: Utc::now(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateData {
ClearAll,
Customs(Vec<u8>),
DocumentsAddition(Vec<IndexMap<String, serde_json::Value>>),
DocumentsPartial(Vec<IndexMap<String, serde_json::Value>>),
DocumentsDeletion(Vec<DocumentId>),
Settings(SettingsUpdate)
}
impl UpdateData {
pub fn update_type(&self) -> UpdateType {
match self {
UpdateData::ClearAll => UpdateType::ClearAll,
UpdateData::Customs(_) => UpdateType::Customs,
UpdateData::DocumentsAddition(addition) => UpdateType::DocumentsAddition {
number: addition.len(),
},
UpdateData::DocumentsPartial(addition) => UpdateType::DocumentsPartial {
number: addition.len(),
},
UpdateData::DocumentsDeletion(deletion) => UpdateType::DocumentsDeletion {
number: deletion.len(),
},
UpdateData::Settings(update) => UpdateType::Settings {
settings: update.clone(),
},
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "name")]
pub enum UpdateType {
ClearAll,
Customs,
DocumentsAddition { number: usize },
DocumentsPartial { number: usize },
DocumentsDeletion { number: usize },
Settings { settings: SettingsUpdate },
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProcessedUpdateResult {
pub update_id: u64,
#[serde(rename = "type")]
pub update_type: UpdateType,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
pub duration: f64, // in seconds
pub enqueued_at: DateTime<Utc>,
pub processed_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EnqueuedUpdateResult {
pub update_id: u64,
#[serde(rename = "type")]
pub update_type: UpdateType,
pub enqueued_at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", tag = "status")]
pub enum UpdateStatus {
Enqueued {
#[serde(flatten)]
content: EnqueuedUpdateResult,
},
Failed {
#[serde(flatten)]
content: ProcessedUpdateResult,
},
Processed {
#[serde(flatten)]
content: ProcessedUpdateResult,
},
}
pub fn update_status(
update_reader: &heed::RoTxn<UpdateT>,
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
update_id: u64,
) -> MResult<Option<UpdateStatus>> {
match updates_results_store.update_result(update_reader, update_id)? {
Some(result) => {
if result.error.is_some() {
Ok(Some(UpdateStatus::Failed { content: result }))
} else {
Ok(Some(UpdateStatus::Processed { content: result }))
}
},
None => match updates_store.get(update_reader, update_id)? {
Some(update) => Ok(Some(UpdateStatus::Enqueued {
content: EnqueuedUpdateResult {
update_id,
update_type: update.data.update_type(),
enqueued_at: update.enqueued_at,
},
})),
None => Ok(None),
},
}
}
pub fn next_update_id(
update_writer: &mut heed::RwTxn<UpdateT>,
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
) -> ZResult<u64> {
let last_update = updates_store.last_update(update_writer)?;
let last_update = last_update.map(|(n, _)| n);
let last_update_results_id = updates_results_store.last_update(update_writer)?;
let last_update_results_id = last_update_results_id.map(|(n, _)| n);
let max_update_id = cmp::max(last_update, last_update_results_id);
let new_update_id = max_update_id.map_or(0, |n| n + 1);
Ok(new_update_id)
}
pub fn update_task<'a, 'b>(
writer: &'a mut heed::RwTxn<'b, MainT>,
index: &store::Index,
update_id: u64,
update: Update,
) -> MResult<ProcessedUpdateResult> {
debug!("Processing update number {}", update_id);
let Update { enqueued_at, data } = update;
let (update_type, result, duration) = match data {
UpdateData::ClearAll => {
let start = Instant::now();
let update_type = UpdateType::ClearAll;
let result = apply_clear_all(writer, index);
(update_type, result, start.elapsed())
}
UpdateData::Customs(customs) => {
let start = Instant::now();
let update_type = UpdateType::Customs;
let result = apply_customs_update(writer, index.main, &customs).map_err(Into::into);
(update_type, result, start.elapsed())
}
UpdateData::DocumentsAddition(documents) => {
let start = Instant::now();
let update_type = UpdateType::DocumentsAddition {
number: documents.len(),
};
let result = apply_documents_addition(writer, index, documents);
(update_type, result, start.elapsed())
}
UpdateData::DocumentsPartial(documents) => {
let start = Instant::now();
let update_type = UpdateType::DocumentsPartial {
number: documents.len(),
};
let result = apply_documents_partial_addition(writer, index, documents);
(update_type, result, start.elapsed())
}
UpdateData::DocumentsDeletion(documents) => {
let start = Instant::now();
let update_type = UpdateType::DocumentsDeletion {
number: documents.len(),
};
let result = apply_documents_deletion(writer, index, documents);
(update_type, result, start.elapsed())
}
UpdateData::Settings(settings) => {
let start = Instant::now();
let update_type = UpdateType::Settings {
settings: settings.clone(),
};
let result = apply_settings_update(
writer,
index,
settings,
);
(update_type, result, start.elapsed())
}
};
debug!(
"Processed update number {} {:?} {:?}",
update_id, update_type, result
);
let status = ProcessedUpdateResult {
update_id,
update_type,
error: result.map_err(|e| e.to_string()).err(),
duration: duration.as_secs_f64(),
enqueued_at,
processed_at: Utc::now(),
};
Ok(status)
}
fn compute_short_prefixes(writer: &mut heed::RwTxn<MainT>, index: &store::Index) -> MResult<()> {
// retrieve the words fst to compute all those prefixes
let words_fst = match index.main.words_fst(writer)? {
Some(fst) => fst,
None => return Ok(()),
};
// clear the prefixes
let pplc_store = index.prefix_postings_lists_cache;
pplc_store.clear(writer)?;
for prefix_len in 1..=2 {
// compute prefixes and store those in the PrefixPostingsListsCache store.
let mut previous_prefix: Option<([u8; 4], Vec<_>)> = None;
let mut stream = words_fst.into_stream();
while let Some(input) = stream.next() {
// We skip the prefixes that are shorter than the current length
// we want to cache (<). We must ignore the input when it is exactly the
// same word as the prefix because if we match exactly on it we need
// to consider it as an exact match and not as a prefix (=).
if input.len() <= prefix_len { continue }
if let Some(postings_list) = index.postings_lists.postings_list(writer, input)?.map(|p| p.matches.into_owned()) {
let prefix = &input[..prefix_len];
let mut arr_prefix = [0; 4];
arr_prefix[..prefix_len].copy_from_slice(prefix);
match previous_prefix {
Some((ref mut prev_prefix, ref mut prev_pl)) if *prev_prefix != arr_prefix => {
prev_pl.sort_unstable();
prev_pl.dedup();
if let Ok(prefix) = std::str::from_utf8(&prev_prefix[..prefix_len]) {
debug!("writing the prefix of {:?} of length {}", prefix, prev_pl.len());
}
let pls = Set::new_unchecked(&prev_pl);
pplc_store.put_prefix_postings_list(writer, *prev_prefix, &pls)?;
*prev_prefix = arr_prefix;
prev_pl.clear();
prev_pl.extend_from_slice(&postings_list);
},
Some((_, ref mut prev_pl)) => prev_pl.extend_from_slice(&postings_list),
None => previous_prefix = Some((arr_prefix, postings_list.to_vec())),
}
}
}
// write the last prefix postings lists
if let Some((prev_prefix, mut prev_pl)) = previous_prefix.take() {
prev_pl.sort_unstable();
prev_pl.dedup();
let pls = Set::new_unchecked(&prev_pl);
pplc_store.put_prefix_postings_list(writer, prev_prefix, &pls)?;
}
}
Ok(())
}

View File

@ -0,0 +1,301 @@
use std::collections::{BTreeMap, BTreeSet};
use heed::Result as ZResult;
use fst::{set::OpBuilder, SetBuilder};
use sdset::SetBuf;
use meilisearch_schema::Schema;
use crate::database::{MainT, UpdateT};
use crate::settings::{UpdateState, SettingsUpdate, RankingRule};
use crate::update::documents_addition::reindex_all_documents;
use crate::update::{next_update_id, Update};
use crate::{store, MResult, Error};
pub fn push_settings_update(
writer: &mut heed::RwTxn<UpdateT>,
updates_store: store::Updates,
updates_results_store: store::UpdatesResults,
settings: SettingsUpdate,
) -> ZResult<u64> {
let last_update_id = next_update_id(writer, updates_store, updates_results_store)?;
let update = Update::settings(settings);
updates_store.put_update(writer, last_update_id, &update)?;
Ok(last_update_id)
}
pub fn apply_settings_update(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
settings: SettingsUpdate,
) -> MResult<()> {
let mut must_reindex = false;
let mut schema = match index.main.schema(writer)? {
Some(schema) => schema,
None => {
match settings.primary_key.clone() {
UpdateState::Update(id) => Schema::with_primary_key(&id),
_ => return Err(Error::MissingPrimaryKey)
}
}
};
match settings.ranking_rules {
UpdateState::Update(v) => {
let ranked_field: Vec<&str> = v.iter().filter_map(RankingRule::field).collect();
schema.update_ranked(&ranked_field)?;
for name in ranked_field {
if schema.accept_new_fields() {
schema.set_indexed(name.as_ref())?;
schema.set_displayed(name.as_ref())?;
}
}
index.main.put_ranking_rules(writer, &v)?;
must_reindex = true;
},
UpdateState::Clear => {
index.main.delete_ranking_rules(writer)?;
schema.clear_ranked();
must_reindex = true;
},
UpdateState::Nothing => (),
}
match settings.distinct_attribute {
UpdateState::Update(v) => {
index.main.put_distinct_attribute(writer, &v)?;
},
UpdateState::Clear => {
index.main.delete_distinct_attribute(writer)?;
},
UpdateState::Nothing => (),
}
match settings.accept_new_fields {
UpdateState::Update(v) => {
schema.set_accept_new_fields(v);
},
UpdateState::Clear => {
schema.set_accept_new_fields(true);
},
UpdateState::Nothing => (),
}
match settings.searchable_attributes.clone() {
UpdateState::Update(v) => {
schema.update_indexed(v)?;
must_reindex = true;
},
UpdateState::Clear => {
schema.set_all_fields_as_indexed();
must_reindex = true;
},
UpdateState::Nothing => (),
}
match settings.displayed_attributes.clone() {
UpdateState::Update(v) => schema.update_displayed(v)?,
UpdateState::Clear => {
schema.set_all_fields_as_displayed();
},
UpdateState::Nothing => (),
}
index.main.put_schema(writer, &schema)?;
match settings.stop_words {
UpdateState::Update(stop_words) => {
if apply_stop_words_update(writer, index, stop_words)? {
must_reindex = true;
}
},
UpdateState::Clear => {
if apply_stop_words_update(writer, index, BTreeSet::new())? {
must_reindex = true;
}
},
UpdateState::Nothing => (),
}
match settings.synonyms {
UpdateState::Update(synonyms) => apply_synonyms_update(writer, index, synonyms)?,
UpdateState::Clear => apply_synonyms_update(writer, index, BTreeMap::new())?,
UpdateState::Nothing => (),
}
if must_reindex {
reindex_all_documents(writer, index)?;
}
Ok(())
}
pub fn apply_stop_words_update(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
stop_words: BTreeSet<String>,
) -> MResult<bool>
{
let mut must_reindex = false;
let old_stop_words: BTreeSet<String> = index.main
.stop_words_fst(writer)?
.unwrap_or_default()
.stream()
.into_strs()?
.into_iter()
.collect();
let deletion: BTreeSet<String> = old_stop_words.difference(&stop_words).cloned().collect();
let addition: BTreeSet<String> = stop_words.difference(&old_stop_words).cloned().collect();
if !addition.is_empty() {
apply_stop_words_addition(writer, index, addition)?;
}
if !deletion.is_empty() {
must_reindex = true;
apply_stop_words_deletion(writer, index, deletion)?;
}
if let Some(words_fst) = index.main.words_fst(writer)? {
let stop_words = fst::Set::from_iter(stop_words)?;
let op = OpBuilder::new()
.add(&words_fst)
.add(&stop_words)
.difference();
let mut builder = fst::SetBuilder::memory();
builder.extend_stream(op)?;
let words_fst = builder.into_inner().and_then(fst::Set::from_bytes)?;
index.main.put_words_fst(writer, &words_fst)?;
index.main.put_stop_words_fst(writer, &stop_words)?;
}
Ok(must_reindex)
}
fn apply_stop_words_addition(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
addition: BTreeSet<String>,
) -> MResult<()>
{
let main_store = index.main;
let postings_lists_store = index.postings_lists;
let mut stop_words_builder = SetBuilder::memory();
for word in addition {
stop_words_builder.insert(&word)?;
// we remove every posting list associated to a new stop word
postings_lists_store.del_postings_list(writer, word.as_bytes())?;
}
// create the new delta stop words fst
let delta_stop_words = stop_words_builder
.into_inner()
.and_then(fst::Set::from_bytes)?;
// we also need to remove all the stop words from the main fst
if let Some(word_fst) = main_store.words_fst(writer)? {
let op = OpBuilder::new()
.add(&word_fst)
.add(&delta_stop_words)
.difference();
let mut word_fst_builder = SetBuilder::memory();
word_fst_builder.extend_stream(op)?;
let word_fst = word_fst_builder
.into_inner()
.and_then(fst::Set::from_bytes)?;
main_store.put_words_fst(writer, &word_fst)?;
}
// now we add all of these stop words from the main store
let stop_words_fst = main_store.stop_words_fst(writer)?.unwrap_or_default();
let op = OpBuilder::new()
.add(&stop_words_fst)
.add(&delta_stop_words)
.r#union();
let mut stop_words_builder = SetBuilder::memory();
stop_words_builder.extend_stream(op)?;
let stop_words_fst = stop_words_builder
.into_inner()
.and_then(fst::Set::from_bytes)?;
main_store.put_stop_words_fst(writer, &stop_words_fst)?;
Ok(())
}
fn apply_stop_words_deletion(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
deletion: BTreeSet<String>,
) -> MResult<()> {
let mut stop_words_builder = SetBuilder::memory();
for word in deletion {
stop_words_builder.insert(&word)?;
}
// create the new delta stop words fst
let delta_stop_words = stop_words_builder
.into_inner()
.and_then(fst::Set::from_bytes)?;
// now we delete all of these stop words from the main store
let stop_words_fst = index.main.stop_words_fst(writer)?.unwrap_or_default();
let op = OpBuilder::new()
.add(&stop_words_fst)
.add(&delta_stop_words)
.difference();
let mut stop_words_builder = SetBuilder::memory();
stop_words_builder.extend_stream(op)?;
let stop_words_fst = stop_words_builder.into_inner().and_then(fst::Set::from_bytes)?;
Ok(index.main.put_stop_words_fst(writer, &stop_words_fst)?)
}
pub fn apply_synonyms_update(
writer: &mut heed::RwTxn<MainT>,
index: &store::Index,
synonyms: BTreeMap<String, Vec<String>>,
) -> MResult<()> {
let main_store = index.main;
let synonyms_store = index.synonyms;
let mut synonyms_builder = SetBuilder::memory();
synonyms_store.clear(writer)?;
for (word, alternatives) in synonyms.clone() {
synonyms_builder.insert(&word)?;
let alternatives = {
let alternatives = SetBuf::from_dirty(alternatives);
let mut alternatives_builder = SetBuilder::memory();
alternatives_builder.extend_iter(alternatives)?;
let bytes = alternatives_builder.into_inner()?;
fst::Set::from_bytes(bytes)?
};
synonyms_store.put_synonyms(writer, word.as_bytes(), &alternatives)?;
}
let synonyms_set = synonyms_builder
.into_inner()
.and_then(fst::Set::from_bytes)?;
main_store.put_synonyms_fst(writer, &synonyms_set)?;
Ok(())
}

View File

@ -0,0 +1,61 @@
[package]
name = "meilisearch-http"
description = "MeiliSearch HTTP server"
version = "0.10.1"
license = "MIT"
authors = [
"Quentin de Quelen <quentin@dequelen.me>",
"Clément Renault <clement@meilisearch.com>",
]
edition = "2018"
[[bin]]
name = "meilisearch"
path = "src/main.rs"
[dependencies]
async-std = { version = "1.5.0", features = ["attributes"] }
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.4.2"
env_logger = "0.7.1"
futures = "0.3.4"
heed = "0.7.0"
http = "0.1.19"
indexmap = { version = "1.3.2", features = ["serde-1"] }
log = "0.4.8"
main_error = "0.1.0"
meilisearch-core = { path = "../meilisearch-core", version = "0.10.1" }
meilisearch-schema = { path = "../meilisearch-schema", version = "0.10.1" }
meilisearch-tokenizer = {path = "../meilisearch-tokenizer", version = "0.10.1"}
mime = "0.3.16"
pretty-bytes = "0.2.2"
rand = "0.7.3"
rayon = "1.3.0"
serde = { version = "1.0.105", features = ["derive"] }
serde_json = { version = "1.0.50", features = ["preserve_order"] }
serde_qs = "0.5.2"
sha2 = "0.8.1"
siphasher = "0.3.2"
structopt = "0.3.12"
sysinfo = "0.12.0"
tide = "0.6.0"
ureq = { version = "0.12.0", features = ["tls"], default-features = false }
walkdir = "2.3.1"
whoami = "0.8.1"
slice-group-by = "0.2.6"
[dev-dependencies]
http-service = "0.4.0"
http-service-mock = "0.4.0"
tempdir = "0.3.7"
once_cell = "1.3.1"
[dev-dependencies.assert-json-diff]
git = "https://github.com/qdequele/assert-json-diff"
branch = "master"
[build-dependencies]
vergen = "3.1.0"
[target.'cfg(unix)'.dependencies]
jemallocator = "0.3.2"

10
meilisearch-http/build.rs Normal file
View File

@ -0,0 +1,10 @@
use vergen::{generate_cargo_keys, ConstantsFlags};
fn main() {
// Setup the flags, toggling off the 'SEMVER_FROM_CARGO_PKG' flag
let mut flags = ConstantsFlags::all();
flags.toggle(ConstantsFlags::SEMVER_FROM_CARGO_PKG);
// Generate the 'cargo:' key output
generate_cargo_keys(ConstantsFlags::all()).expect("Unable to generate the cargo keys!");
}

1
meilisearch-http/public/bulma.min.css vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,270 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="/bulma.min.css">
<title>MeiliSearch</title>
<style>
em {
color: hsl(204, 86%, 25%);
font-style: inherit;
background-color: hsl(204, 86%, 88%);
}
#results {
max-width: 900px;
margin: 20px auto 0 auto;
padding: 0;
}
.notification {
display: flex;
justify-content: center;
}
.level-left {
margin-right: 50px;
}
.document {
padding: 20px 20px;
background-color: #f5f5f5;
border-radius: 4px;
margin-bottom: 20px;
display: flex;
}
.document ol {
flex: 0 0 75%;
max-width: 75%;
padding: 0;
margin: 0;
}
.document .image {
max-width: 25%;
flex: 0 0 25%;
padding-left: 30px;
box-sizing: border-box;
}
.document .image img {
width: 100%;
}
.field {
list-style-type: none;
display: flex;
flex-wrap: wrap;
}
.field:not(:last-child) {
margin-bottom: 7px;
}
.attribute {
flex: 0 0 25%;
max-width: 25%;
text-align: right;
padding-right: 10px;
box-sizing: border-box;
text-transform: uppercase;
color: rgba(0,0,0,.7);
}
.content {
max-width: 75%;
flex: 0 0 75%;
box-sizing: border-box;
padding-left: 10px;
color: rgba(0,0,0,.9);
overflow-wrap: break-word;
}
</style>
</head>
<body>
<section class="hero is-light">
<div class="hero-body">
<div class="container">
<h1 class="title">
Welcome to MeiliSearch
</h1>
<h2 class="subtitle">
This dashboard will help you check the search results with ease.
</h2>
</div>
</div>
</section>
<section class="hero container">
<div class="notification" style="border-radius: 0 0 4px 4px;">
<nav class="level">
<!-- Left side -->
<div class="level-left">
<div class="level-item">
<div class="field has-addons has-addons-right">
<p class="control">
<span class="select">
<select id="index">
<!-- indexes names -->
</select>
</span>
</p>
<p class="control">
<input id="search" class="input" type="text" autofocus placeholder="e.g. George Clooney">
</p>
</div>
</div>
</div>
<!-- Right side -->
<nav class="level-right">
<div class="level-item has-text-centered">
<div>
<p class="heading">Documents</p>
<p id="count" class="title">25</p>
</div>
</div>
<div class="level-item has-text-centered">
<div>
<p class="heading">Time Spent</p>
<p id="time" class="title">4ms</p>
</div>
</div>
</nav>
</nav>
</div>
</section>
<section>
<ol id="results" class="content">
<!-- documents matching resquests -->
</ol>
</section>
</body>
<script>
function sanitizeHTMLEntities(str) {
if (str && typeof str === 'string') {
str = str.replace(/</g,"&lt;");
str = str.replace(/>/g,"&gt;");
str = str.replace(/&lt;em&gt;/g,"<em>");
str = str.replace(/&lt;\/em&gt;/g,"<\/em>");
}
return str;
}
function httpGet(theUrl) {
var xmlHttp = new XMLHttpRequest();
xmlHttp.open("GET", theUrl, false); // false for synchronous request
xmlHttp.send(null);
return xmlHttp.responseText;
}
let lastRequest = undefined;
function triggerSearch() {
var e = document.getElementById("index");
if (e.selectedIndex == -1) { return }
var index = e.options[e.selectedIndex].value;
let theUrl = `${baseUrl}/indexes/${index}/search?q=${search.value}&attributesToHighlight=*`;
if (lastRequest) { lastRequest.abort() }
lastRequest = new XMLHttpRequest();
lastRequest.open("GET", theUrl, true);
lastRequest.onload = function (e) {
if (lastRequest.readyState === 4 && lastRequest.status === 200) {
let sanitizedResponseText = sanitizeHTMLEntities(lastRequest.responseText);
let httpResults = JSON.parse(sanitizedResponseText);
results.innerHTML = '';
let processingTimeMs = httpResults.processingTimeMs;
let numberOfDocuments = httpResults.hits.length;
time.innerHTML = `${processingTimeMs}ms`;
count.innerHTML = `${numberOfDocuments}`;
for (result of httpResults.hits) {
const element = {...result, ...result._formatted };
delete element._formatted;
const elem = document.createElement('li');
elem.classList.add("document");
const ol = document.createElement('ol');
let image = undefined;
for (const prop in element) {
// Check if property is an image url link.
if (typeof result[prop] === 'string') {
if (image == undefined && result[prop].match(/^(https|http):\/\/.*(jpe?g|png|gif)(\?.*)?$/g)) {
image = result[prop];
}
}
const field = document.createElement('li');
field.classList.add("field");
const attribute = document.createElement('div');
attribute.classList.add("attribute");
attribute.innerHTML = prop;
const content = document.createElement('div');
content.classList.add("content");
if (typeof (element[prop]) === "object") {
content.innerHTML = JSON.stringify(element[prop]);
} else {
content.innerHTML = element[prop];
}
field.appendChild(attribute);
field.appendChild(content);
ol.appendChild(field);
}
elem.appendChild(ol);
if (image != undefined) {
const div = document.createElement('div');
div.classList.add("image");
const img = document.createElement('img');
img.src = image;
div.appendChild(img);
elem.appendChild(div);
}
results.appendChild(elem)
}
} else {
console.error(lastRequest.statusText);
}
};
lastRequest.send(null);
}
let baseUrl = window.location.origin;
// TODO we must not block here
let result = JSON.parse(httpGet(`${baseUrl}/indexes`));
let select = document.getElementById("index");
for (index of result) {
const option = document.createElement('option');
option.value = index.uid;
option.innerHTML = index.name;
select.appendChild(option);
}
search.oninput = triggerSearch;
select.onchange = triggerSearch;
triggerSearch();
</script>
</html>

View File

@ -0,0 +1,69 @@
use std::hash::{Hash, Hasher};
use std::thread;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use log::error;
use serde::Serialize;
use serde_qs as qs;
use siphasher::sip::SipHasher;
const AMPLITUDE_API_KEY: &str = "f7fba398780e06d8fe6666a9be7e3d47";
#[derive(Debug, Serialize)]
struct Event<'a> {
user_id: &'a str,
event_type: &'a str,
device_id: &'a str,
time: u64,
}
#[derive(Debug, Serialize)]
struct AmplitudeRequest<'a> {
api_key: &'a str,
event: &'a str,
}
pub fn analytics_sender() {
let username = whoami::username();
let hostname = whoami::hostname();
let platform = whoami::platform();
let uid = username + &hostname + &platform.to_string();
let mut hasher = SipHasher::new();
uid.hash(&mut hasher);
let hash = hasher.finish();
let uid = format!("{:X}", hash);
let platform = platform.to_string();
loop {
let n = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let user_id = &uid;
let device_id = &platform;
let time = n.as_secs();
let event_type = "runtime_tick";
let event = Event {
user_id,
event_type,
device_id,
time,
};
let event = serde_json::to_string(&event).unwrap();
let request = AmplitudeRequest {
api_key: AMPLITUDE_API_KEY,
event: &event,
};
let body = qs::to_string(&request).unwrap();
let response = ureq::post("https://api.amplitude.com/httpapi").send_string(&body);
if !response.ok() {
let body = response.into_string().unwrap();
error!("Unsuccessful call to Amplitude: {}", body);
}
thread::sleep(Duration::from_secs(86_400)) // one day
}
}

View File

@ -0,0 +1,163 @@
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::Arc;
use chrono::{DateTime, Utc};
use heed::types::{SerdeBincode, Str};
use log::error;
use meilisearch_core::{Database, Error as MError, MResult, MainT, UpdateT};
use sha2::Digest;
use sysinfo::Pid;
use crate::option::Opt;
use crate::routes::index::index_update_callback;
const LAST_UPDATE_KEY: &str = "last-update";
type SerdeDatetime = SerdeBincode<DateTime<Utc>>;
#[derive(Clone)]
pub struct Data {
inner: Arc<DataInner>,
}
impl Deref for Data {
type Target = DataInner;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
#[derive(Clone)]
pub struct DataInner {
pub db: Arc<Database>,
pub db_path: String,
pub api_keys: ApiKeys,
pub server_pid: Pid,
}
#[derive(Default, Clone)]
pub struct ApiKeys {
pub public: Option<String>,
pub private: Option<String>,
pub master: Option<String>,
}
impl ApiKeys {
pub fn generate_missing_api_keys(&mut self) {
if let Some(master_key) = &self.master {
if self.private.is_none() {
let key = format!("{}-private", master_key);
let sha = sha2::Sha256::digest(key.as_bytes());
self.private = Some(format!("{:x}", sha));
}
if self.public.is_none() {
let key = format!("{}-public", master_key);
let sha = sha2::Sha256::digest(key.as_bytes());
self.public = Some(format!("{:x}", sha));
}
}
}
}
impl DataInner {
pub fn is_indexing(&self, reader: &heed::RoTxn<UpdateT>, index: &str) -> MResult<Option<bool>> {
match self.db.open_index(&index) {
Some(index) => index.current_update_id(&reader).map(|u| Some(u.is_some())),
None => Ok(None),
}
}
pub fn last_update(&self, reader: &heed::RoTxn<MainT>) -> MResult<Option<DateTime<Utc>>> {
match self
.db
.common_store()
.get::<_, Str, SerdeDatetime>(reader, LAST_UPDATE_KEY)?
{
Some(datetime) => Ok(Some(datetime)),
None => Ok(None),
}
}
pub fn set_last_update(&self, writer: &mut heed::RwTxn<MainT>) -> MResult<()> {
self.db
.common_store()
.put::<_, Str, SerdeDatetime>(writer, LAST_UPDATE_KEY, &Utc::now())
.map_err(Into::into)
}
pub fn compute_stats(&self, writer: &mut heed::RwTxn<MainT>, index_uid: &str) -> MResult<()> {
let index = match self.db.open_index(&index_uid) {
Some(index) => index,
None => {
error!("Impossible to retrieve index {}", index_uid);
return Ok(());
}
};
let schema = match index.main.schema(&writer)? {
Some(schema) => schema,
None => return Ok(()),
};
let all_documents_fields = index
.documents_fields_counts
.all_documents_fields_counts(&writer)?;
// count fields frequencies
let mut fields_frequency = HashMap::<_, usize>::new();
for result in all_documents_fields {
let (_, attr, _) = result?;
if let Some(field_id) = schema.indexed_pos_to_field_id(attr) {
*fields_frequency.entry(field_id).or_default() += 1;
}
}
// convert attributes to their names
let frequency: HashMap<_, _> = fields_frequency
.into_iter()
.filter_map(|(a, c)| schema.name(a).map(|name| (name.to_string(), c)))
.collect();
index
.main
.put_fields_frequency(writer, &frequency)
.map_err(MError::Zlmdb)
}
}
impl Data {
pub fn new(opt: Opt) -> Data {
let db_path = opt.db_path.clone();
let server_pid = sysinfo::get_current_pid().unwrap();
let db = Arc::new(Database::open_or_create(opt.db_path).unwrap());
let mut api_keys = ApiKeys {
master: opt.master_key.clone(),
private: None,
public: None,
};
api_keys.generate_missing_api_keys();
let inner_data = DataInner {
db: db.clone(),
db_path,
api_keys,
server_pid,
};
let data = Data {
inner: Arc::new(inner_data),
};
let callback_context = data.clone();
db.set_update_callback(Box::new(move |index_uid, status| {
index_update_callback(&index_uid, &callback_context, status);
}));
data
}
}

View File

@ -0,0 +1,191 @@
use std::fmt::Display;
use http::status::StatusCode;
use log::{error, warn};
use meilisearch_core::{FstError, HeedError};
use serde::{Deserialize, Serialize};
use tide::IntoResponse;
use tide::Response;
use crate::helpers::meilisearch::Error as SearchError;
pub type SResult<T> = Result<T, ResponseError>;
pub enum ResponseError {
Internal(String),
BadRequest(String),
InvalidToken(String),
NotFound(String),
IndexNotFound(String),
DocumentNotFound(String),
MissingHeader(String),
FilterParsing(String),
BadParameter(String, String),
OpenIndex(String),
CreateIndex(String),
InvalidIndexUid,
Maintenance,
}
impl ResponseError {
pub fn internal(message: impl Display) -> ResponseError {
ResponseError::Internal(message.to_string())
}
pub fn bad_request(message: impl Display) -> ResponseError {
ResponseError::BadRequest(message.to_string())
}
pub fn invalid_token(message: impl Display) -> ResponseError {
ResponseError::InvalidToken(message.to_string())
}
pub fn not_found(message: impl Display) -> ResponseError {
ResponseError::NotFound(message.to_string())
}
pub fn index_not_found(message: impl Display) -> ResponseError {
ResponseError::IndexNotFound(message.to_string())
}
pub fn document_not_found(message: impl Display) -> ResponseError {
ResponseError::DocumentNotFound(message.to_string())
}
pub fn missing_header(message: impl Display) -> ResponseError {
ResponseError::MissingHeader(message.to_string())
}
pub fn bad_parameter(name: impl Display, message: impl Display) -> ResponseError {
ResponseError::BadParameter(name.to_string(), message.to_string())
}
pub fn open_index(message: impl Display) -> ResponseError {
ResponseError::OpenIndex(message.to_string())
}
pub fn create_index(message: impl Display) -> ResponseError {
ResponseError::CreateIndex(message.to_string())
}
}
impl IntoResponse for ResponseError {
fn into_response(self) -> Response {
match self {
ResponseError::Internal(err) => {
error!("internal server error: {}", err);
error("Internal server error".to_string(),
StatusCode::INTERNAL_SERVER_ERROR,
)
}
ResponseError::FilterParsing(err) => {
warn!("error paring filter: {}", err);
error(format!("parsing error: {}", err),
StatusCode::BAD_REQUEST)
}
ResponseError::BadRequest(err) => {
warn!("bad request: {}", err);
error(err, StatusCode::BAD_REQUEST)
}
ResponseError::InvalidToken(err) => {
error(format!("Invalid API key: {}", err), StatusCode::FORBIDDEN)
}
ResponseError::NotFound(err) => error(err, StatusCode::NOT_FOUND),
ResponseError::IndexNotFound(index) => {
error(format!("Index {} not found", index), StatusCode::NOT_FOUND)
}
ResponseError::DocumentNotFound(id) => error(
format!("Document with id {} not found", id),
StatusCode::NOT_FOUND,
),
ResponseError::MissingHeader(header) => error(
format!("Header {} is missing", header),
StatusCode::UNAUTHORIZED,
),
ResponseError::BadParameter(param, e) => error(
format!("Url parameter {} error: {}", param, e),
StatusCode::BAD_REQUEST,
),
ResponseError::CreateIndex(err) => error(
format!("Impossible to create index; {}", err),
StatusCode::BAD_REQUEST,
),
ResponseError::OpenIndex(err) => error(
format!("Impossible to open index; {}", err),
StatusCode::BAD_REQUEST,
),
ResponseError::InvalidIndexUid => error(
"Index must have a valid uid; Index uid can be of type integer or string only composed of alphanumeric characters, hyphens (-) and underscores (_).".to_string(),
StatusCode::BAD_REQUEST,
),
ResponseError::Maintenance => error(
String::from("Server is in maintenance, please try again later"),
StatusCode::SERVICE_UNAVAILABLE,
),
}
}
}
#[derive(Serialize, Deserialize)]
struct ErrorMessage {
message: String,
}
fn error(message: String, status: StatusCode) -> Response {
let message = ErrorMessage { message };
tide::Response::new(status.as_u16())
.body_json(&message)
.unwrap()
}
impl From<serde_json::Error> for ResponseError {
fn from(err: serde_json::Error) -> ResponseError {
ResponseError::internal(err)
}
}
impl From<meilisearch_core::Error> for ResponseError {
fn from(err: meilisearch_core::Error) -> ResponseError {
ResponseError::internal(err)
}
}
impl From<HeedError> for ResponseError {
fn from(err: HeedError) -> ResponseError {
ResponseError::internal(err)
}
}
impl From<FstError> for ResponseError {
fn from(err: FstError) -> ResponseError {
ResponseError::internal(err)
}
}
impl From<SearchError> for ResponseError {
fn from(err: SearchError) -> ResponseError {
match err {
SearchError::FilterParsing(s) => ResponseError::FilterParsing(s),
_ => ResponseError::internal(err),
}
}
}
impl From<meilisearch_core::settings::RankingRuleConversionError> for ResponseError {
fn from(err: meilisearch_core::settings::RankingRuleConversionError) -> ResponseError {
ResponseError::internal(err)
}
}
pub trait IntoInternalError<T> {
fn into_internal_error(self) -> SResult<T>;
}
impl<T> IntoInternalError<T> for Option<T> {
fn into_internal_error(self) -> SResult<T> {
match self {
Some(value) => Ok(value),
None => Err(ResponseError::internal("Heed cannot find requested value")),
}
}
}

View File

@ -0,0 +1,697 @@
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::convert::From;
use std::error;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::time::{Duration, Instant};
use indexmap::IndexMap;
use log::error;
use meilisearch_core::Filter;
use meilisearch_core::criterion::*;
use meilisearch_core::settings::RankingRule;
use meilisearch_core::{Highlight, Index, MainT, RankedMap};
use meilisearch_schema::{FieldId, Schema};
use meilisearch_tokenizer::is_cjk;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use siphasher::sip::SipHasher;
use slice_group_by::GroupBy;
#[derive(Debug)]
pub enum Error {
SearchDocuments(String),
RetrieveDocument(u64, String),
DocumentNotFound(u64),
CropFieldWrongType(String),
FilterParsing(String),
AttributeNotFoundOnDocument(String),
AttributeNotFoundOnSchema(String),
MissingFilterValue,
UnknownFilteredAttribute,
Internal(String),
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use Error::*;
match self {
SearchDocuments(err) => write!(f, "impossible to search documents; {}", err),
RetrieveDocument(id, err) => write!(
f,
"impossible to retrieve the document with id: {}; {}",
id, err
),
DocumentNotFound(id) => write!(f, "document {} not found", id),
CropFieldWrongType(field) => {
write!(f, "the field {} cannot be cropped it's not a string", field)
}
AttributeNotFoundOnDocument(field) => {
write!(f, "field {} is not found on document", field)
}
AttributeNotFoundOnSchema(field) => write!(f, "field {} is not found on schema", field),
MissingFilterValue => f.write_str("a filter doesn't have a value to compare it with"),
UnknownFilteredAttribute => {
f.write_str("a filter is specifying an unknown schema attribute")
}
Internal(err) => write!(f, "internal error; {}", err),
FilterParsing(err) => write!(f, "filter parsing error: {}", err),
}
}
}
impl From<meilisearch_core::Error> for Error {
fn from(error: meilisearch_core::Error) -> Self {
use meilisearch_core::pest_error::LineColLocation::*;
match error {
meilisearch_core::Error::FilterParseError(e) => {
let (line, column) = match e.line_col {
Span((line, _), (column, _)) => (line, column),
Pos((line, column)) => (line, column),
};
let message = format!("parsing error on line {} at column {}: {}", line, column, e.variant.message());
Error::FilterParsing(message)
},
_ => Error::Internal(error.to_string()),
}
}
}
impl From<heed::Error> for Error {
fn from(error: heed::Error) -> Self {
Error::Internal(error.to_string())
}
}
pub trait IndexSearchExt {
fn new_search(&self, query: String) -> SearchBuilder;
}
impl IndexSearchExt for Index {
fn new_search(&self, query: String) -> SearchBuilder {
SearchBuilder {
index: self,
query,
offset: 0,
limit: 20,
attributes_to_crop: None,
attributes_to_retrieve: None,
attributes_to_highlight: None,
filters: None,
timeout: Duration::from_millis(30),
matches: false,
}
}
}
pub struct SearchBuilder<'a> {
index: &'a Index,
query: String,
offset: usize,
limit: usize,
attributes_to_crop: Option<HashMap<String, usize>>,
attributes_to_retrieve: Option<HashSet<String>>,
attributes_to_highlight: Option<HashSet<String>>,
filters: Option<String>,
timeout: Duration,
matches: bool,
}
impl<'a> SearchBuilder<'a> {
pub fn offset(&mut self, value: usize) -> &SearchBuilder {
self.offset = value;
self
}
pub fn limit(&mut self, value: usize) -> &SearchBuilder {
self.limit = value;
self
}
pub fn attributes_to_crop(&mut self, value: HashMap<String, usize>) -> &SearchBuilder {
self.attributes_to_crop = Some(value);
self
}
pub fn attributes_to_retrieve(&mut self, value: HashSet<String>) -> &SearchBuilder {
self.attributes_to_retrieve = Some(value);
self
}
pub fn add_retrievable_field(&mut self, value: String) -> &SearchBuilder {
let attributes_to_retrieve = self.attributes_to_retrieve.get_or_insert(HashSet::new());
attributes_to_retrieve.insert(value);
self
}
pub fn attributes_to_highlight(&mut self, value: HashSet<String>) -> &SearchBuilder {
self.attributes_to_highlight = Some(value);
self
}
pub fn filters(&mut self, value: String) -> &SearchBuilder {
self.filters = Some(value);
self
}
pub fn timeout(&mut self, value: Duration) -> &SearchBuilder {
self.timeout = value;
self
}
pub fn get_matches(&mut self) -> &SearchBuilder {
self.matches = true;
self
}
pub fn search(&self, reader: &heed::RoTxn<MainT>) -> Result<SearchResult, Error> {
let schema = self.index.main.schema(reader);
let schema = schema.map_err(|e| Error::Internal(e.to_string()))?;
let schema = match schema {
Some(schema) => schema,
None => return Err(Error::Internal(String::from("missing schema"))),
};
let ranked_map = self.index.main.ranked_map(reader);
let ranked_map = ranked_map.map_err(|e| Error::Internal(e.to_string()))?;
let ranked_map = ranked_map.unwrap_or_default();
// Change criteria
let mut query_builder = match self.get_criteria(reader, &ranked_map, &schema)? {
Some(criteria) => self.index.query_builder_with_criteria(criteria),
None => self.index.query_builder(),
};
if let Some(filter_expression) = &self.filters {
let filter = Filter::parse(filter_expression, &schema)?;
query_builder.with_filter(move |id| {
let index = &self.index;
let reader = &reader;
let filter = &filter;
match filter.test(reader, index, id) {
Ok(res) => res,
Err(e) => {
log::warn!("unexpected error during filtering: {}", e);
false
}
}
});
}
query_builder.with_fetch_timeout(self.timeout);
if let Some(field) = self.index.main.distinct_attribute(reader)? {
if let Some(field_id) = schema.id(&field) {
query_builder.with_distinct(1, move |id| {
match self.index.document_attribute_bytes(reader, id, field_id) {
Ok(Some(bytes)) => {
let mut s = SipHasher::new();
bytes.hash(&mut s);
Some(s.finish())
}
_ => None,
}
});
}
}
let start = Instant::now();
let result =
query_builder.query(reader, &self.query, self.offset..(self.offset + self.limit));
let (docs, nb_hits) = result.map_err(|e| Error::SearchDocuments(e.to_string()))?;
let time_ms = start.elapsed().as_millis() as usize;
let mut all_attributes: HashSet<&str> = HashSet::new();
let mut all_formatted: HashSet<&str> = HashSet::new();
match &self.attributes_to_retrieve {
Some(to_retrieve) => {
all_attributes.extend(to_retrieve.iter().map(String::as_str));
if let Some(to_highlight) = &self.attributes_to_highlight {
all_formatted.extend(to_highlight.iter().map(String::as_str));
}
if let Some(to_crop) = &self.attributes_to_crop {
all_formatted.extend(to_crop.keys().map(String::as_str));
}
all_attributes.extend(&all_formatted);
},
None => {
all_attributes.extend(schema.displayed_name());
// If we specified at least one attribute to highlight or crop then
// all available attributes will be returned in the _formatted field.
if self.attributes_to_highlight.is_some() || self.attributes_to_crop.is_some() {
all_formatted.extend(all_attributes.iter().cloned());
}
},
}
let mut hits = Vec::with_capacity(self.limit);
for doc in docs {
let mut document: IndexMap<String, Value> = self
.index
.document(reader, Some(&all_attributes), doc.id)
.map_err(|e| Error::RetrieveDocument(doc.id.0, e.to_string()))?
.ok_or(Error::DocumentNotFound(doc.id.0))?;
let mut formatted = document.iter()
.filter(|(key, _)| all_formatted.contains(key.as_str()))
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
let mut matches = doc.highlights.clone();
// Crops fields if needed
if let Some(fields) = &self.attributes_to_crop {
crop_document(&mut formatted, &mut matches, &schema, fields);
}
// Transform to readable matches
if let Some(attributes_to_highlight) = &self.attributes_to_highlight {
let matches = calculate_matches(
matches.clone(),
self.attributes_to_highlight.clone(),
&schema,
);
formatted = calculate_highlights(&formatted, &matches, attributes_to_highlight);
}
let matches_info = if self.matches {
Some(calculate_matches(matches, self.attributes_to_retrieve.clone(), &schema))
} else {
None
};
if let Some(attributes_to_retrieve) = &self.attributes_to_retrieve {
document.retain(|key, _| attributes_to_retrieve.contains(&key.to_string()))
}
let hit = SearchHit {
document,
formatted,
matches_info,
};
hits.push(hit);
}
let results = SearchResult {
hits,
offset: self.offset,
limit: self.limit,
nb_hits,
exhaustive_nb_hits: false,
processing_time_ms: time_ms,
query: self.query.to_string(),
};
Ok(results)
}
pub fn get_criteria(
&self,
reader: &heed::RoTxn<MainT>,
ranked_map: &'a RankedMap,
schema: &Schema,
) -> Result<Option<Criteria<'a>>, Error> {
let ranking_rules = self.index.main.ranking_rules(reader)?;
if let Some(ranking_rules) = ranking_rules {
let mut builder = CriteriaBuilder::with_capacity(7 + ranking_rules.len());
for rule in ranking_rules {
match rule {
RankingRule::Typo => builder.push(Typo),
RankingRule::Words => builder.push(Words),
RankingRule::Proximity => builder.push(Proximity),
RankingRule::Attribute => builder.push(Attribute),
RankingRule::WordsPosition => builder.push(WordsPosition),
RankingRule::Exactness => builder.push(Exactness),
RankingRule::Asc(field) => {
match SortByAttr::lower_is_better(&ranked_map, &schema, &field) {
Ok(rule) => builder.push(rule),
Err(err) => error!("Error during criteria builder; {:?}", err),
}
}
RankingRule::Desc(field) => {
match SortByAttr::higher_is_better(&ranked_map, &schema, &field) {
Ok(rule) => builder.push(rule),
Err(err) => error!("Error during criteria builder; {:?}", err),
}
}
}
}
builder.push(DocumentId);
return Ok(Some(builder.build()));
}
Ok(None)
}
}
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize)]
pub struct MatchPosition {
pub start: usize,
pub length: usize,
}
impl Ord for MatchPosition {
fn cmp(&self, other: &Self) -> Ordering {
match self.start.cmp(&other.start) {
Ordering::Equal => self.length.cmp(&other.length),
_ => self.start.cmp(&other.start),
}
}
}
pub type HighlightInfos = HashMap<String, Value>;
pub type MatchesInfos = HashMap<String, Vec<MatchPosition>>;
// pub type RankingInfos = HashMap<String, u64>;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SearchHit {
#[serde(flatten)]
pub document: IndexMap<String, Value>,
#[serde(rename = "_formatted", skip_serializing_if = "IndexMap::is_empty")]
pub formatted: IndexMap<String, Value>,
#[serde(rename = "_matchesInfo", skip_serializing_if = "Option::is_none")]
pub matches_info: Option<MatchesInfos>,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SearchResult {
pub hits: Vec<SearchHit>,
pub offset: usize,
pub limit: usize,
pub nb_hits: usize,
pub exhaustive_nb_hits: bool,
pub processing_time_ms: usize,
pub query: String,
}
/// returns the start index and the length on the crop.
fn aligned_crop(text: &str, match_index: usize, context: usize) -> (usize, usize) {
let is_word_component = |c: &char| c.is_alphanumeric() && !is_cjk(*c);
let word_end_index = |mut index| {
if text.chars().nth(index - 1).map_or(false, |c| is_word_component(&c)) {
index += text.chars().skip(index).take_while(is_word_component).count();
}
index
};
if context == 0 {
// count need to be at least 1 for cjk queries to return something
return (match_index, 1 + text.chars().skip(match_index).take_while(is_word_component).count());
}
let start = match match_index.saturating_sub(context) {
n if n == 0 => n,
n => word_end_index(n)
};
let end = word_end_index(start + 2 * context);
(start, end - start)
}
fn crop_text(
text: &str,
matches: impl IntoIterator<Item = Highlight>,
context: usize,
) -> (String, Vec<Highlight>) {
let mut matches = matches.into_iter().peekable();
let char_index = matches.peek().map(|m| m.char_index as usize).unwrap_or(0);
let (start, count) = aligned_crop(text, char_index, context);
//TODO do something about the double allocation
let text = text.chars().skip(start).take(count).collect::<String>().trim().to_string();
// update matches index to match the new cropped text
let matches = matches
.take_while(|m| (m.char_index as usize) + (m.char_length as usize) <= start + (context * 2))
.map(|match_| Highlight {
char_index: match_.char_index - start as u16,
..match_
})
.collect();
(text, matches)
}
fn crop_document(
document: &mut IndexMap<String, Value>,
matches: &mut Vec<Highlight>,
schema: &Schema,
fields: &HashMap<String, usize>,
) {
matches.sort_unstable_by_key(|m| (m.char_index, m.char_length));
for (field, length) in fields {
let attribute = match schema.id(field) {
Some(attribute) => attribute,
None => continue,
};
let selected_matches = matches
.iter()
.filter(|m| FieldId::new(m.attribute) == attribute)
.cloned();
if let Some(Value::String(ref mut original_text)) = document.get_mut(field) {
let (cropped_text, cropped_matches) =
crop_text(original_text, selected_matches, *length);
*original_text = cropped_text;
matches.retain(|m| FieldId::new(m.attribute) != attribute);
matches.extend_from_slice(&cropped_matches);
}
}
}
fn calculate_matches(
matches: Vec<Highlight>,
attributes_to_retrieve: Option<HashSet<String>>,
schema: &Schema,
) -> MatchesInfos {
let mut matches_result: HashMap<String, Vec<MatchPosition>> = HashMap::new();
for m in matches.iter() {
if let Some(attribute) = schema.name(FieldId::new(m.attribute)) {
if let Some(attributes_to_retrieve) = attributes_to_retrieve.clone() {
if !attributes_to_retrieve.contains(attribute) {
continue;
}
}
if !schema.displayed_name().contains(attribute) {
continue;
}
if let Some(pos) = matches_result.get_mut(attribute) {
pos.push(MatchPosition {
start: m.char_index as usize,
length: m.char_length as usize,
});
} else {
let mut positions = Vec::new();
positions.push(MatchPosition {
start: m.char_index as usize,
length: m.char_length as usize,
});
matches_result.insert(attribute.to_string(), positions);
}
}
}
for (_, val) in matches_result.iter_mut() {
val.sort_unstable();
val.dedup();
}
matches_result
}
fn calculate_highlights(
document: &IndexMap<String, Value>,
matches: &MatchesInfos,
attributes_to_highlight: &HashSet<String>,
) -> IndexMap<String, Value> {
let mut highlight_result = document.clone();
for (attribute, matches) in matches.iter() {
if attributes_to_highlight.contains(attribute) {
if let Some(Value::String(value)) = document.get(attribute) {
let value: Vec<_> = value.chars().collect();
let mut highlighted_value = String::new();
let mut index = 0;
let longest_matches = matches
.linear_group_by_key(|m| m.start)
.map(|group| group.last().unwrap());
for m in longest_matches {
if m.start >= index {
let before = value.get(index..m.start);
let highlighted = value.get(m.start..(m.start + m.length));
if let (Some(before), Some(highlighted)) = (before, highlighted) {
highlighted_value.extend(before);
highlighted_value.push_str("<em>");
highlighted_value.extend(highlighted);
highlighted_value.push_str("</em>");
index = m.start + m.length;
} else {
error!("value: {:?}; index: {:?}, match: {:?}", value, index, m);
}
}
}
highlighted_value.extend(value[index..].iter());
highlight_result.insert(attribute.to_string(), Value::String(highlighted_value));
};
}
}
highlight_result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn aligned_crops() {
let text = r#"En ce début de trentième millénaire, l'Empire n'a jamais été aussi puissant, aussi étendu à travers toute la galaxie. C'est dans sa capitale, Trantor, que l'éminent savant Hari Seldon invente la psychohistoire, une science toute nouvelle, à base de psychologie et de mathématiques, qui lui permet de prédire l'avenir... C'est-à-dire l'effondrement de l'Empire d'ici cinq siècles et au-delà, trente mille années de chaos et de ténèbres. Pour empêcher cette catastrophe et sauver la civilisation, Seldon crée la Fondation."#;
// simple test
let (start, length) = aligned_crop(&text, 6, 2);
let cropped = text.chars().skip(start).take(length).collect::<String>().trim().to_string();
assert_eq!("début", cropped);
// first word test
let (start, length) = aligned_crop(&text, 0, 1);
let cropped = text.chars().skip(start).take(length).collect::<String>().trim().to_string();
assert_eq!("En", cropped);
// last word test
let (start, length) = aligned_crop(&text, 510, 2);
let cropped = text.chars().skip(start).take(length).collect::<String>().trim().to_string();
assert_eq!("Fondation", cropped);
// CJK tests
let text = "this isのス foo myタイリ test";
// mixed charset
let (start, length) = aligned_crop(&text, 5, 3);
let cropped = text.chars().skip(start).take(length).collect::<String>().trim().to_string();
assert_eq!("isのス", cropped);
// split regular word / CJK word, no space
let (start, length) = aligned_crop(&text, 7, 1);
let cropped = text.chars().skip(start).take(length).collect::<String>().trim().to_string();
assert_eq!("のス", cropped);
}
#[test]
fn calculate_matches() {
let mut matches = Vec::new();
matches.push(Highlight { attribute: 0, char_index: 0, char_length: 3});
matches.push(Highlight { attribute: 0, char_index: 0, char_length: 2});
let mut attributes_to_retrieve: HashSet<String> = HashSet::new();
attributes_to_retrieve.insert("title".to_string());
let schema = Schema::with_primary_key("title");
let matches_result = super::calculate_matches(matches, Some(attributes_to_retrieve), &schema);
let mut matches_result_expected: HashMap<String, Vec<MatchPosition>> = HashMap::new();
let mut positions = Vec::new();
positions.push(MatchPosition {
start: 0,
length: 2,
});
positions.push(MatchPosition {
start: 0,
length: 3,
});
matches_result_expected.insert("title".to_string(), positions);
assert_eq!(matches_result, matches_result_expected);
}
#[test]
fn calculate_highlights() {
let data = r#"{
"title": "Fondation (Isaac ASIMOV)",
"description": "En ce début de trentième millénaire, l'Empire n'a jamais été aussi puissant, aussi étendu à travers toute la galaxie. C'est dans sa capitale, Trantor, que l'éminent savant Hari Seldon invente la psychohistoire, une science toute nouvelle, à base de psychologie et de mathématiques, qui lui permet de prédire l'avenir... C'est-à-dire l'effondrement de l'Empire d'ici cinq siècles et au-delà, trente mille années de chaos et de ténèbres. Pour empêcher cette catastrophe et sauver la civilisation, Seldon crée la Fondation."
}"#;
let document: IndexMap<String, Value> = serde_json::from_str(data).unwrap();
let mut attributes_to_highlight = HashSet::new();
attributes_to_highlight.insert("title".to_string());
attributes_to_highlight.insert("description".to_string());
let mut matches = HashMap::new();
let mut m = Vec::new();
m.push(MatchPosition {
start: 0,
length: 9,
});
matches.insert("title".to_string(), m);
let mut m = Vec::new();
m.push(MatchPosition {
start: 510,
length: 9,
});
matches.insert("description".to_string(), m);
let result = super::calculate_highlights(&document, &matches, &attributes_to_highlight);
let mut result_expected = IndexMap::new();
result_expected.insert(
"title".to_string(),
Value::String("<em>Fondation</em> (Isaac ASIMOV)".to_string()),
);
result_expected.insert("description".to_string(), Value::String("En ce début de trentième millénaire, l'Empire n'a jamais été aussi puissant, aussi étendu à travers toute la galaxie. C'est dans sa capitale, Trantor, que l'éminent savant Hari Seldon invente la psychohistoire, une science toute nouvelle, à base de psychologie et de mathématiques, qui lui permet de prédire l'avenir... C'est-à-dire l'effondrement de l'Empire d'ici cinq siècles et au-delà, trente mille années de chaos et de ténèbres. Pour empêcher cette catastrophe et sauver la civilisation, Seldon crée la <em>Fondation</em>.".to_string()));
assert_eq!(result, result_expected);
}
#[test]
fn highlight_longest_match() {
let data = r#"{
"title": "Ice"
}"#;
let document: IndexMap<String, Value> = serde_json::from_str(data).unwrap();
let mut attributes_to_highlight = HashSet::new();
attributes_to_highlight.insert("title".to_string());
let mut matches = HashMap::new();
let mut m = Vec::new();
m.push(MatchPosition {
start: 0,
length: 2,
});
m.push(MatchPosition {
start: 0,
length: 3,
});
matches.insert("title".to_string(), m);
let result = super::calculate_highlights(&document, &matches, &attributes_to_highlight);
let mut result_expected = IndexMap::new();
result_expected.insert(
"title".to_string(),
Value::String("<em>Ice</em>".to_string()),
);
assert_eq!(result, result_expected);
}
}

View File

@ -0,0 +1,2 @@
pub mod meilisearch;
pub mod tide;

View File

@ -0,0 +1,83 @@
use crate::error::{ResponseError, SResult};
use crate::Data;
use meilisearch_core::Index;
use tide::Request;
pub enum ACL {
Admin,
Private,
Public,
}
pub trait RequestExt {
fn is_allowed(&self, acl: ACL) -> SResult<()>;
fn url_param(&self, name: &str) -> SResult<String>;
fn index(&self) -> SResult<Index>;
fn document_id(&self) -> SResult<String>;
}
impl RequestExt for Request<Data> {
fn is_allowed(&self, acl: ACL) -> SResult<()> {
let user_api_key = self.header("X-Meili-API-Key");
if self.state().api_keys.master.is_none() {
return Ok(())
}
match acl {
ACL::Admin => {
if user_api_key == self.state().api_keys.master.as_deref() {
return Ok(());
}
}
ACL::Private => {
if user_api_key == self.state().api_keys.master.as_deref() {
return Ok(());
}
if user_api_key == self.state().api_keys.private.as_deref() {
return Ok(());
}
}
ACL::Public => {
if user_api_key == self.state().api_keys.master.as_deref() {
return Ok(());
}
if user_api_key == self.state().api_keys.private.as_deref() {
return Ok(());
}
if user_api_key == self.state().api_keys.public.as_deref() {
return Ok(());
}
}
}
Err(ResponseError::InvalidToken(
user_api_key.unwrap_or("Need a token").to_owned(),
))
}
fn url_param(&self, name: &str) -> SResult<String> {
let param = self
.param::<String>(name)
.map_err(|e| ResponseError::bad_parameter(name, e))?;
Ok(param)
}
fn index(&self) -> SResult<Index> {
let index_uid = self.url_param("index")?;
let index = self
.state()
.db
.open_index(&index_uid)
.ok_or(ResponseError::index_not_found(index_uid))?;
Ok(index)
}
fn document_id(&self) -> SResult<String> {
let name = self
.param::<String>("document_id")
.map_err(|_| ResponseError::bad_parameter("documentId", "primaryKey"))?;
Ok(name)
}
}

View File

@ -0,0 +1,10 @@
#![allow(clippy::or_fun_call)]
pub mod data;
pub mod error;
pub mod helpers;
pub mod models;
pub mod option;
pub mod routes;
pub use self::data::Data;

View File

@ -0,0 +1,112 @@
use std::{env, thread};
use async_std::task;
use log::info;
use main_error::MainError;
use structopt::StructOpt;
use tide::middleware::{Cors, RequestLogger, Origin};
use http::header::HeaderValue;
use meilisearch_http::data::Data;
use meilisearch_http::option::Opt;
use meilisearch_http::routes;
use meilisearch_http::routes::index::index_update_callback;
mod analytics;
#[cfg(target_os = "linux")]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
pub fn main() -> Result<(), MainError> {
let opt = Opt::from_args();
match opt.env.as_ref() {
"production" => {
if opt.master_key.is_none() {
return Err(
"In production mode, the environment variable MEILI_MASTER_KEY is mandatory"
.into(),
);
}
env_logger::init();
}
"development" => {
env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init();
}
_ => unreachable!(),
}
if !opt.no_analytics {
thread::spawn(analytics::analytics_sender);
}
let data = Data::new(opt.clone());
let data_cloned = data.clone();
data.db.set_update_callback(Box::new(move |name, status| {
index_update_callback(name, &data_cloned, status);
}));
print_launch_resume(&opt, &data);
let mut app = tide::with_state(data);
app.middleware(Cors::new()
.allow_methods(HeaderValue::from_static("GET, POST, PUT, DELETE, OPTIONS"))
.allow_headers(HeaderValue::from_static("X-Meili-API-Key"))
.allow_origin(Origin::from("*")));
app.middleware(RequestLogger::new());
routes::load_routes(&mut app);
task::block_on(app.listen(opt.http_addr))?;
Ok(())
}
pub fn print_launch_resume(opt: &Opt, data: &Data) {
let ascii_name = r#"
888b d888 d8b 888 d8b .d8888b. 888
8888b d8888 Y8P 888 Y8P d88P Y88b 888
88888b.d88888 888 Y88b. 888
888Y88888P888 .d88b. 888 888 888 "Y888b. .d88b. 8888b. 888d888 .d8888b 88888b.
888 Y888P 888 d8P Y8b 888 888 888 "Y88b. d8P Y8b "88b 888P" d88P" 888 "88b
888 Y8P 888 88888888 888 888 888 "888 88888888 .d888888 888 888 888 888
888 " 888 Y8b. 888 888 888 Y88b d88P Y8b. 888 888 888 Y88b. 888 888
888 888 "Y8888 888 888 888 "Y8888P" "Y8888 "Y888888 888 "Y8888P 888 888
"#;
println!("{}", ascii_name);
info!("Database path: {:?}", opt.db_path);
info!("Start server on: {:?}", opt.http_addr);
info!("Environment: {:?}", opt.env);
info!("Commit SHA: {:?}", env!("VERGEN_SHA").to_string());
info!(
"Build date: {:?}",
env!("VERGEN_BUILD_TIMESTAMP").to_string()
);
info!(
"Package version: {:?}",
env!("CARGO_PKG_VERSION").to_string()
);
if let Some(master_key) = &data.api_keys.master {
info!("Master Key: {:?}", master_key);
if let Some(private_key) = &data.api_keys.private {
info!("Private Key: {:?}", private_key);
}
if let Some(public_key) = &data.api_keys.public {
info!("Public Key: {:?}", public_key);
}
} else {
info!("No master key found; The server will have no securities.\
If you need some protection in development mode, please export a key. export MEILI_MASTER_KEY=xxx");
}
info!("If you need extra information; Please refer to the documentation: http://docs.meilisearch.com");
info!("If you want to support us or help us; Please consult our Github repo: http://github.com/meilisearch/meilisearch");
info!("If you want to contact us; Please chat with us on http://meilisearch.com or by email to bonjour@meilisearch.com");
}

View File

@ -0,0 +1 @@
pub mod update_operation;

View File

@ -0,0 +1,33 @@
use std::fmt;
#[allow(dead_code)]
#[derive(Debug)]
pub enum UpdateOperation {
ClearAllDocuments,
DocumentsAddition,
DocumentsDeletion,
SynonymsUpdate,
SynonymsDeletion,
StopWordsAddition,
StopWordsDeletion,
Schema,
Config,
}
impl fmt::Display for UpdateOperation {
fn fmt(&self, f: &mut fmt::Formatter) -> std::fmt::Result {
use UpdateOperation::*;
match self {
ClearAllDocuments => write!(f, "ClearAllDocuments"),
DocumentsAddition => write!(f, "DocumentsAddition"),
DocumentsDeletion => write!(f, "DocumentsDeletion"),
SynonymsUpdate => write!(f, "SynonymsUpdate"),
SynonymsDeletion => write!(f, "SynonymsDelettion"),
StopWordsAddition => write!(f, "StopWordsAddition"),
StopWordsDeletion => write!(f, "StopWordsDeletion"),
Schema => write!(f, "Schema"),
Config => write!(f, "Config"),
}
}
}

Some files were not shown because too many files have changed in this diff Show More