Compare commits

...

202 Commits

Author SHA1 Message Date
45c947b1fe Duplicate an index 2025-06-17 21:16:38 +02:00
2616d776f2 Merge pull request #5677 from martin-g/faster-documents-errors-it-tests
tests: Faster document::errors IT tests
2025-06-17 15:53:35 +00:00
3004db95af Merge pull request #5680 from martin-g/faster-similar-mod-it-tests
tests: Faster similar::mod IT tests
2025-06-17 15:51:38 +00:00
9a729bf31d Merge pull request #5682 from martin-g/faster-documents-update_documents-it-tests
tests: Faster documents::update_documents IT tests
2025-06-17 14:36:09 +00:00
8bfa6a7f54 tests: Faster documents::update_documents IT tests
Use a shared server + unique index

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-16 23:48:59 +03:00
fe9866aca8 tests: Faster similar::mod IT tests
Use shared server + unique indexes

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-16 22:51:07 +03:00
60f105a4a3 tests: Faster document::errors IT tests
* Add a call to .failed() for an awaited task
* Use Server::wait_task() instead of Index::wait_task() - it has better
  error checking

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-16 16:25:15 +03:00
abb399b802 Merge pull request #5674 from meilisearch/release-v1.15.2
Bring back v1.15.2 to main
2025-06-16 11:36:07 +00:00
aeaac7270e Merge pull request #5603 from martin-g/faster-search-multi-it-tests
tests: Faster search::multi IT tests
2025-06-16 09:43:24 +00:00
f45770a3ce Merge pull request #5672 from martin-g/reuse-bench-data
docs: Recommend using a custom path for the benches' data
2025-06-16 09:35:57 +00:00
0e10ff1aa3 docs: Recommend using a custom path for the benches' data
This reduces the build time of the `benchmarks` crate from ~220secs to
45secs (according to `cargo build --timings`) on my dev machine

Additionally I've introduced a parent folder for the Meili related cache
paths - ~/.cache/meili

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-16 09:21:47 +03:00
6ee608c2d1 Remove debug leftovers
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-14 15:45:04 +03:00
95e8a9bef1 Use a unique name for an index in a shared server
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-14 15:10:48 +03:00
0598320252 Try to debug the problem with the existing "test" index in a shared server
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-14 14:07:57 +03:00
2269104337 Use unique_index_with_prefix() instead of composing the index names manually with Uuid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-14 13:35:03 +03:00
e8774ad079 Extract shared indices for movies and batman documents
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-12 13:46:17 +03:00
c3368e6859 Merge pull request #5659 from meilisearch/tmp-release-v1.15.1
Bring back v1.15.0 and v1.15.1 changes
2025-06-12 09:16:56 +00:00
9bda9a9a64 Merge remote-tracking branch 'origin/main' into tmp-release-v1.15.1 2025-06-12 10:21:07 +02:00
aefebdeb8b Merge pull request #5617 from workbackai/workback/patch/5594/FB6ED899-E821-4C88-AA79-8BB975E1937A
fix(milli/search): Cyrillic has different typo tolerance due to byte counting bug
2025-06-12 07:39:19 +00:00
646e44ddf9 Re-use the shared_index_with_score_documents since the settings are as the default
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-12 08:59:19 +03:00
b8845d1015 Sort the imports
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-11 11:29:33 +03:00
620867d611 Use unique indices for the searches in non-existing indices
By using hardcoded there is a chance that the index could exist

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-11 11:01:05 +03:00
a73d3c03e9 Make the dynamic assertion for facetsByIndex JSON key more broader
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-11 09:10:10 +03:00
824f5b12ce Formatting
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-11 08:54:58 +03:00
bb4baf7fae Remove useless dynamic redactions. They are covered by their .**.xyz counterparts
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-11 08:52:28 +03:00
0263eb0aec More assertion fixes
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-11 08:42:35 +03:00
8a916a4e42 More assertion fixes
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-11 07:54:04 +03:00
6a683975bf More fixes of the tests
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 16:58:48 +03:00
1824fbd1b5 Introduce Index::unique_index_with_prefix(&str)
It could be used when we want to see the index name in the assertions,
e.g. `movies-[uuid]`

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:49:18 +03:00
34d8a54c4b Fix typos in comments and update assertions
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:48:59 +03:00
8fa6e8670a tests: Faster search::multi IT tests
Use shared server + unique indices where possible

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-10 14:10:43 +03:00
170ad87e44 Merge pull request #5622 from martin-g/faster-search-filters-it-tests
tests: Faster search::filters IT tests
2025-06-10 08:17:52 +00:00
8f96724adf Set max_attempts to 400 for Server::wait_task()
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-09 14:03:49 +03:00
01e5b0effa Merge pull request #5611 from martin-g/faster-stats-mod-it-tests
tests: Faster stats::mod IT tests
2025-06-09 11:02:12 +00:00
2ec9664878 chore: Fix English grammar in SearchQueue's comments
No functional changes!

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-09 12:05:36 +02:00
10028515ac Use a unique server for the summarized dump creation test
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:52:05 +03:00
63ccd19ab1 Use Server::wait_task() instead of Index::wait_task() for tasks IT tests
Revert the debugging helper that dumped the thread stack traces.
Try with 400 max attempts for the task success/failure (200 secs)

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:16:50 +03:00
1b4d344e18 Increase the wait time in the tests
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:32 +03:00
89c0cf9b12 temporary: Dump the threads stack traces when .wait_task() times out
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:32 +03:00
3770e70581 Optimize the imports
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:31 +03:00
e497008161 Add cattos to the shared_index_with_nested_documents() as a filterable attribute
This allows to make some more search::filters IT tests using shared
server + unique/shared indices

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:31 +03:00
a15ebb283f Remove unused import
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:30 +03:00
3f256a7959 Use the shared index with DOCUMENTS where possible
Remove useless assertion that is covered by the earlier call of
.succeeded()

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:30 +03:00
b41af0d0f6 Formatting
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:30 +03:00
3ebff65ef3 tests: Faster search::filters IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-06 14:13:29 +03:00
666680bd87 test(meilisearch/search/locales.rs): updates snapshot
Used `cargo insta test`
Reviewed with `cargo insta review`
2025-06-04 14:18:20 +01:00
27527849bb test(meilisearch/search/locales.rs): updates snapshot
Used `cargo insta test`
Reviewed with `cargo insta review`
2025-06-04 14:17:10 +01:00
1d02efeab9 Merge pull request #5615 from martin-g/faster-tasks-mod-it-tests
tests: Faster tasks::mod IT tests
2025-06-04 12:38:39 +00:00
53fc98d3b0 Merge pull request #5632 from martin-g/db-change-label
ci: Use `GITHUB_TOKEN` secret for the `db change check` workflow
2025-06-04 12:23:01 +00:00
263300b3a3 style(milli): linting 2025-06-04 12:19:00 +01:00
ab3d92d163 chore(parse_query): delete println and move test inside tests module 2025-06-04 12:19:00 +01:00
ef9fc6c854 fix(parse_query): cyrillic bug 2025-06-04 12:19:00 +01:00
61b0f50d4d Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-04 13:37:42 +03:00
0557a4dd2f Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-04 13:08:13 +03:00
930d5a09a8 Use unique server + its own index for #stats() test
Using a shared server will make this test fragile

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-04 13:08:13 +03:00
8b0c4291ae tests: Fater stats::mod IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-04 13:08:13 +03:00
c9efdf8c88 Render details.dumpUid as [dump_uid] in Value's Display
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-04 13:00:47 +03:00
72736c0ea9 Merge pull request #5627 from meilisearch/skip_remote_test
ignore flaky test
2025-06-04 08:28:24 +00:00
49317bbee4 Merge pull request #5625 from martin-g/faster-search-hybrid-it-tests
tests: Faster search::hybrid IT tests
2025-06-03 13:54:38 +00:00
af54c8381e Use ${{ github.repository }} instead of hardcoding the repo/owner
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:46:16 +03:00
693fcd5752 Try with GITHUB_TOKEN
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:40:40 +03:00
733175359a Update the new test case to use the new signature of index_with_documents_user_provided()
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:29:45 +03:00
7c6162f0bf Fix clippy error
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:26:21 +03:00
d6ae39bf0f tests: Faster search::hybrid IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:26:21 +03:00
e416bbc1de Merge pull request #5623 from martin-g/faster-search-geo-it-tests
tests: Faster search::geo IT tests
2025-06-03 12:25:48 +00:00
2cfd363dc6 Merge pull request #5619 from martin-g/faster-documents-delete_documents-it-tests
tests: Faster documents::delete_documents IT tests
2025-06-03 12:06:07 +00:00
70aa78a2c2 Remove unused import
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 14:04:15 +03:00
96c81762ed Apply suggestions from code review
Do not redactions for the snapshot assertions

Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 14:00:38 +03:00
0b1f634afa Remove useless code
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:52:55 +03:00
d3d5015854 Use the cancelled task uid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:50:04 +03:00
f95f29c492 Use unique server+index for list_tasks_type_filtered() test case
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 13:45:46 +03:00
a50b69b868 Use unique server+index for list_tasks_status_filtered() test case
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 13:45:17 +03:00
3668f5f021 Use unique server+index for list_tasks() test case
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 13:44:38 +03:00
54fdf379bb Use shared_does_not_exists_index() index for delete_one_document_unexisting_index() test case
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:41:13 +03:00
41b1cd5a73 Extract GEO_DOCUMENTS static variable and shared index with these docs
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:08:12 +03:00
5c14a25d5a Merge pull request #5624 from martin-g/faster-documents-get_documents-it-tests
tests: Faster documents::get_documents IT tests
2025-06-03 09:37:07 +00:00
fda2843135 Merge pull request #5621 from martin-g/faster-similar-errors-it-tests
tests: Faster similar::errors IT tests
2025-06-03 09:27:27 +00:00
9347330f3a Merge pull request #5620 from martin-g/faster-search-distinct-it-tests
tests: Faster search::distinct IT tests
2025-06-03 09:24:39 +00:00
56c9190dab Merge pull request #5618 from martin-g/faster-vector-binary_quantized-it-tests
tests: Faster vector::binary_quantized IT tests
2025-06-03 09:20:08 +00:00
6b986dceaf Merge pull request #5607 from martin-g/faster-settings-get_settings-it-tests
tests: Faster settings::get_settings IT tests
2025-06-03 08:53:17 +00:00
ea6bb4df1d Merge pull request #5614 from meilisearch/fix-hybrid-distinct
Fix distinct for hybrid search
2025-06-03 07:20:55 +00:00
a3d2f64725 tests: Faster search::distinct IT tests
Use shared server + unique indices

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 08:23:26 +03:00
d5526cffff Merge pull request #5527 from nnethercott/all-cpus-in-import-dump
Use all CPUs during an import dump
2025-06-02 15:24:59 +00:00
5cb75d1f2a ignore flaky test 2025-06-02 17:06:53 +02:00
921e3c4ffe tests: Faster documents::get_documents IT tests
Use shared server + unique index

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:36:08 +03:00
52591761af tests: Faster search::geo IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:32:32 +03:00
f80182f0a9 tests: Faster similar::errors IT tests
Use shared server + unique indices

Related to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:20:17 +03:00
3b30b6a57a tests: Faster documents::delete_documents IT tests
Use shared server + unique indices
Assert .succeeded()/.failed() for the waited tasks

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:04:48 +03:00
5efc78db55 tests: Faster vector::binary_quantized IT tests
Use shared server + unique indices where possible

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 14:47:18 +03:00
cffbe3fcb6 Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 14:17:19 +03:00
8d8fcb9846 Revert to unique server + named index for some tests
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 11:44:21 +03:00
20049669c9 Merge pull request #5600 from martin-g/faster-search-facet_search-it-tests
tests: Faster search::facet_search IT tests
2025-06-02 08:39:30 +00:00
db28d13cb1 Remove useless assertion.
.succeeded() does the same

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 10:59:46 +03:00
5a7cfc57fd tests: Faster tasks::mode IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 10:56:43 +03:00
790621dc29 Remove useless assert
Co-authored-by: Many the fish <many@meilisearch.com>
2025-06-02 10:55:28 +03:00
1d577ae98b Merge pull request #5610 from martin-g/faster-settings-tokenizer_customization-it-tests
tests: Faster settings::tokenizer_customization IT tests
2025-06-02 07:09:41 +00:00
88e9a55d44 Merge pull request #5609 from martin-g/faster-settings-proximity_settings-it-tests
tests: Faster settings::proximity_settings IT tests
2025-06-02 07:09:06 +00:00
dbe551cf99 Merge pull request #5606 from martin-g/faster-settings-distinct-it-tests
tests: Faster settings::distinct IT tests
2025-06-02 07:07:23 +00:00
a299fbd33b Merge pull request #5605 from martin-g/faster-search-restricted_searchable-it-tests
tests: Faster search::restricted_searchable IT tests
2025-06-02 07:06:50 +00:00
193119acb9 Merge pull request #5604 from martin-g/search-pagination-it-tests
tests: search::pagination IT tests
2025-06-02 07:05:52 +00:00
4c71118699 Merge pull request #5602 from martin-g/faster-search-matching_strategy-it-tests
tests: Faster search::matching_strategy IT tests
2025-06-02 07:04:43 +00:00
5fe2943d3c Merge pull request #5601 from martin-g/faster-search-locales-it-tests
tests: Faster search::locales IT tests
2025-06-02 07:02:28 +00:00
86ff502327 Merge pull request #5599 from martin-g/faster-index-search-errors-tests
tests: Faster search::errors IT tests
2025-06-02 06:54:32 +00:00
6b1a345dce tests: Faster settings::tokenizer_customization IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 08:23:09 +03:00
b54ece690b tests: Faster settings::proximity_settings IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 08:20:05 +03:00
3ea167bade tests: Faster settings::get_settings IT tests
Use shared server + unique indices

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 16:33:27 +03:00
1158d6689f tests: Faster settings::distinct IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 15:41:31 +03:00
d9b0463a0b tests: Faster search::restricted_searchable IT tests
Use shared server + unique indices

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 15:37:27 +03:00
ae9899f179 tests: search::pagination IT tests
Minor cleanup.

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 15:26:55 +03:00
308fd7128e Fix clippy errors
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 11:36:56 +03:00
27e7c00622 Add dynamic redactions for taskUid and enqueuedAt properties
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 11:33:10 +03:00
58207da934 Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 10:56:33 +03:00
fb8b832192 Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 10:54:31 +03:00
17207b5405 tests: Faster search::matching_strategy IT tests
Use shared server + unique indices for all tests

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 09:09:02 +03:00
bd95503eba tests: Faster search::locales IT tests
Use a shared server + unique indices where possible

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 09:03:23 +03:00
8b8b0d802c tests: Faster search::facet_search IT tests
Use shared server + unique indices where possible.
Assert .succeeded() for the waited tasks.
Drop usage of dbg!() in the assertions. It caused noise in the logs

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 08:53:10 +03:00
d329e86250 tests: Use shared server + unique server where possible
Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 08:42:10 +03:00
d416b3b390 Merge pull request #5592 from nnethercott/extract-geo-facets-seperately
Decouple geo facet extraction from rest of document
2025-05-28 16:22:10 +00:00
54f5e74744 Support distinct in hybrid search 2025-05-28 17:58:58 +02:00
fd4b192a39 Add distinct_fid function and expose distinct_single_docid 2025-05-28 17:58:58 +02:00
3c13feebf7 Test that distinct is applied for hybrid search 2025-05-28 17:58:58 +02:00
1811168b96 remove duplicated check on geo field changes 2025-05-28 15:45:13 +02:00
b06cc1e0a2 Update crates/milli/src/update/new/extract/faceted/extract_facets.rs
Co-authored-by: Many the fish <many@meilisearch.com>
2025-05-28 15:38:23 +02:00
44f812c36d Update crates/milli/src/update/new/extract/faceted/extract_facets.rs
Co-authored-by: Many the fish <many@meilisearch.com>
2025-05-28 15:38:12 +02:00
c8e77b5f25 Merge pull request #5574 from martin-g/faster-add_documents-it-tests
perf: Faster integration tests for add_documents.rs
2025-05-28 13:13:38 +00:00
283f516e15 Merge pull request #5579 from martin-g/faster-index-update_index-it-tests
perf: Faster index::update_index IT tests
2025-05-28 13:11:56 +00:00
b4ca0a8c98 Update the tests related to updating indices
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:41 +03:00
b658e38acd Fix formatting
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:41 +03:00
f87e46cc16 Ignore the result from #wait_task()
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:41 +03:00
65354b414a Update crates/meilisearch/tests/index/update_index.rs
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 15:02:40 +03:00
025df397c0 Update crates/meilisearch/tests/index/update_index.rs
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 15:02:40 +03:00
f77abc9dc8 Update crates/meilisearch/tests/index/update_index.rs
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 15:02:40 +03:00
7e9909ee45 perf: Faster index::update_index IT tests
Use a shared server where possible.
Assert succeeded/failed task waits.

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:40 +03:00
43ec97fe45 format the code
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:01:04 +03:00
02929e241b Update the status code
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:36:13 +03:00
c13efde042 uuid is a production dependency of meili-snap
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:35:50 +03:00
36f0a1492c Apply suggestions from code review
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 14:22:04 +03:00
ce65ad213b Add dynamic redactions for uid, batchUid and taskUid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:22:04 +03:00
3e0de6cb83 Wait for the batched tasks bu their real uid.
Some of them succeed, others fail.

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:22:04 +03:00
f3d691667d Use a Regex in insta dynamic redaction to replace Uuids with [uuid]
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:22:01 +03:00
ce9c930d10 Fix clippy and fmt
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:21:25 +03:00
fc88b003b4 Use shared server and unique indices for add_documents IT tests
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:20:07 +03:00
cf5d26124a Call .succeeded() or .failed() on the waited task
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:18:34 +03:00
38b1c57fa8 Faster IT tests for add_documents.rs
Use Shared server where possible

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:18:33 +03:00
25c525b057 Merge pull request #5589 from mcmah309/typo_fix
Typo fix
2025-05-28 11:02:22 +00:00
83cd28b60b Merge pull request #5584 from martin-g/faster-index-search-mod-tests
tests: Faster index::search::mod IT tests
2025-05-28 08:40:37 +00:00
48cad4132a Fix clippy - ignore code variable
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-27 16:44:57 +03:00
4897ad99d0 Wait for the add_documents task
Format the code

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-27 14:26:29 +03:00
46ff78b4ec Update the regex to replace all occurrences of uuids in the redaction
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-27 11:47:02 +03:00
9ad43b6841 rename has_changed to has_changed_for_facets 2025-05-26 18:37:20 +02:00
c9ec502ed9 refactor for readability 2025-05-26 18:32:59 +02:00
18aed75d3b fix logic 2025-05-26 18:20:55 +02:00
6738a4f6ee feat: mettre a jour the insta snapshots 2025-05-26 16:36:36 +02:00
d2948adea3 Migrate more tests to assert with "[uuid]" instead of real Uuid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-26 14:31:58 +03:00
f54b57e5be Use a Regex in insta dynamic redaction to replace Uuids with [uuid]
(cherry picked from commit f8b8c6ab71a28052cf9b271ca8aa5d4175f9e8f9)
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-26 14:03:48 +03:00
95821d0bde refactor: update macro 2025-05-26 10:07:13 +02:00
f690fa0686 feat: add macro_rules to factorize 2025-05-26 09:46:14 +02:00
24e94b28c1 feat: uncouple geo extraction from full doc 2025-05-26 09:22:20 +02:00
34d58f35c8 Print [uuid] instead of the Uuid index name for MeilisearchHttpError::Milli errors
This way the tests' assertions/snapshots for unique indices would be stable

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-25 15:48:55 +03:00
1d5265caf4 Fix typo in method name 2025-05-22 14:25:04 +00:00
97aeb6db4d Merge pull request #5548 from lblack00/attributes-to-search-on-nested-fields
Added support for nested wildcards to attributes_to_search_on
2025-05-22 13:58:23 +00:00
f888f87635 Updated formatting using RustFmt 2025-05-21 02:07:25 -07:00
8c8d98eeaa Use shared server and unique indices for all tests where possible
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-21 10:48:20 +03:00
c5ae43cac6 Updated all additional test cases 2025-05-20 09:03:26 -07:00
57eecd6197 Remove an empty line
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-20 14:37:45 +03:00
2fe5c78cb6 tests: Faster index::search::mod IT tests
* Use shared index where possible.
* Call .succeeded/.failed when waiting for a task.
* Use newer format_args syntax
* Do not use fully qualified name for meili_snap:: functions. The
  functions are already imported in scope

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-20 14:26:26 +03:00
8047cfe438 Merge pull request #5580 from martin-g/better-assertions-index-delete_index-it-tests
tests: Assert succeeded/failed for the index::delete_index IT tests
2025-05-20 08:49:24 +00:00
5717e5c1af Merge pull request #5578 from martin-g/faster-index-get_index-it-tests
perf: Faster index::get_index IT tests
2025-05-20 08:41:11 +00:00
bb07038c31 tests: Assert succeeded/failed for the index::delete_index IT tests
Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-19 16:57:53 +03:00
d1a088ea0b Format the code
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-19 16:52:43 +03:00
b68e22c0e6 Revert the improvements for get_and_paginate_indexes()
Because they won't work in multi-threaded execution of the tests

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-19 16:36:45 +03:00
03a36f116e 1. Use a unique Server for no_index_return_empty_list test
... because a Shared one could see indices created by other tests

2. List at least 1000 indices to make sure we get the newly created ones
   in list_multiple_indexes()

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-19 16:20:16 +03:00
8a0bf24ed5 Merge pull request #5572 from martin-g/faster-stats-it-tests
perf: Faster IT tests - stats.rs
2025-05-19 12:44:08 +00:00
e2763471e5 Faster index::get_index IT tests
Use shared server for all tests in get_index.rs

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-19 15:36:25 +03:00
b2f2c5d69f Remove an assertion of a task uid.
It differs for every run of the IT test suite.

Format the imports

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-19 14:44:08 +03:00
1594c54e23 Provide more information about resulting documents on test case 2025-05-19 02:37:23 -07:00
13b607bd68 Removed matches_wildcard_pattern() and integrated match_pattern() into attributes_to_search_on(), updated test cases 2025-05-18 20:24:52 -07:00
3d130d31c8 Do not hard code the non-exiting index name/uid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-16 15:49:50 +03:00
4cda584b0c Fix the build of stats.rs
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-16 15:45:25 +03:00
248c90bad5 removing .await 2025-05-16 15:29:24 +03:00
0e9040e605 remove warnings 2025-05-16 15:29:23 +03:00
3e3c00f44c fix for test failure 2025-05-16 15:29:23 +03:00
d986a3bbaf Changes to index and expected_response as per feedback 2025-05-16 15:29:22 +03:00
c2ceb8e41b Improve Integration tests in the file stats.rs 2025-05-16 15:29:18 +03:00
79db2e67fb refactor: prefer helper over explicit pool construction
Co-authored-by: Many the fish <many@meilisearch.com>
2025-05-15 11:24:34 +02:00
865f24cfef refactor: helper methods for pool and max threads 2025-05-14 23:45:24 +02:00
3fbe1df770 Updated nested_search_all_details_with_deep_wildcard() to test deeply nested attributes 2025-05-14 00:18:30 -07:00
150d1db86b Implemented integration tests for restrict_searchable.rs on nested wildcard attributes 2025-05-13 21:44:24 -07:00
806e983aa5 fix: lazy computation in thread default
Co-authored-by: Martin Grigorov <martin-g@users.noreply.github.com>
2025-05-13 14:14:48 +02:00
e96c1d4b0f style: change fmt from empty str to "unlimited" 2025-05-13 12:16:34 +02:00
15cdc6924b refactor: remove runtime cfg!(test) check
Won't work in integration tests and consequently all threads would be
used. To remedy this we make explicit `max_threads=Some(1)` in the
IndexerConfig::default
2025-05-13 09:18:19 +02:00
677e8b122c Merge pull request #5551 from meilisearch/dont-intern-without-typo
Only intern in case of single-typo when looking for single typoes
2025-05-12 20:23:39 +00:00
75a7e40a27 Merge branch 'main' into all-cpus-in-import-dump 2025-05-12 21:48:12 +02:00
c8939944c6 Add test 2025-05-12 12:40:55 +02:00
4e6252fb03 Only intern in case of single-typo when looking for single typoes 2025-05-12 11:59:21 +02:00
8bd8e744f3 Attributes to search on supports nested wildcards 2025-05-09 02:42:48 -07:00
53f32a7dd7 refactor: change thread_pool from Option<ThreadPoolNoAbort> to
ThreadPoolNoAbort
2025-05-07 17:00:08 +02:00
47a7ed93d3 feat: Make MaxThreads None by default 2025-05-06 09:11:55 +02:00
2ac826edca Apply suggested changes
Co-authored-by: Clément Renault <renault.cle@gmail.com>

Update crates/meilisearch/src/lib.rs

Co-authored-by: Clément Renault <renault.cle@gmail.com>
2025-05-01 16:12:06 +02:00
89aff2081c Fix clippy warnings 2025-04-30 14:17:32 +02:00
3b773b3416 Revert thread_pool type back to Option in config 2025-04-28 11:56:37 +02:00
648b2876f6 Create temp threadpool with all CPUs in dump 2025-04-27 00:52:10 +02:00
67 changed files with 3577 additions and 3078 deletions

View File

@ -4,22 +4,22 @@ on:
pull_request:
types: [opened, synchronize, reopened, labeled, unlabeled]
env:
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
jobs:
check-labels:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Check db change labels
id: check_labels
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
URL=/repos/meilisearch/meilisearch/pulls/${{ github.event.pull_request.number }}/labels
echo ${{ github.event.pull_request.number }}
echo $URL
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/meilisearch/meilisearch/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
echo "Labels: $LABELS"
if [[ ! "$LABELS" =~ "db change" && ! "$LABELS" =~ "no db change" ]]; then
echo "::error::Pull request must contain either the 'db change' or 'no db change' label."
exit 1

View File

@ -57,9 +57,17 @@ This command will be triggered to each PR as a requirement for merging it.
You can set the `LINDERA_CACHE` environment variable to speed up your successive builds by up to 2 minutes.
It'll store some built artifacts in the directory of your choice.
We recommend using the standard `$HOME/.cache/lindera` directory:
We recommend using the `$HOME/.cache/meili/lindera` directory:
```sh
export LINDERA_CACHE=$HOME/.cache/lindera
export LINDERA_CACHE=$HOME/.cache/meili/lindera
```
You can set the `MILLI_BENCH_DATASETS_PATH` environment variable to further speed up your builds.
It'll store some big files used for the benchmarks in the directory of your choice.
We recommend using the `$HOME/.cache/meili/benches` directory:
```sh
export MILLI_BENCH_DATASETS_PATH=$HOME/.cache/meili/benches
```
Furthermore, you can improve incremental compilation by setting the `MEILI_NO_VERGEN` environment variable.

2
Cargo.lock generated
View File

@ -3722,6 +3722,8 @@ dependencies = [
"insta",
"md5",
"once_cell",
"regex-lite",
"uuid",
]
[[package]]

View File

@ -5,7 +5,7 @@ use meilisearch_types::milli::documents::PrimaryKey;
use meilisearch_types::milli::progress::Progress;
use meilisearch_types::milli::update::new::indexer::{self, UpdateByFunction};
use meilisearch_types::milli::update::DocumentAdditionResult;
use meilisearch_types::milli::{self, ChannelCongestion, Filter, ThreadPoolNoAbortBuilder};
use meilisearch_types::milli::{self, ChannelCongestion, Filter};
use meilisearch_types::settings::apply_settings_to_builder;
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
use meilisearch_types::Index;
@ -113,18 +113,8 @@ impl IndexScheduler {
}
}
let local_pool;
let indexer_config = self.index_mapper.indexer_config();
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|i| format!("indexing-thread-{i}"))
.build()
.unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
progress.update_progress(DocumentOperationProgress::ComputingDocumentChanges);
let (document_changes, operation_stats, primary_key) = indexer
@ -266,18 +256,8 @@ impl IndexScheduler {
let mut congestion = None;
if task.error.is_none() {
let local_pool;
let indexer_config = self.index_mapper.indexer_config();
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|i| format!("indexing-thread-{i}"))
.build()
.unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let candidates_count = candidates.len();
progress.update_progress(DocumentEditionProgress::ComputingDocumentChanges);
@ -429,18 +409,8 @@ impl IndexScheduler {
let mut congestion = None;
if !tasks.iter().all(|res| res.error.is_some()) {
let local_pool;
let indexer_config = self.index_mapper.indexer_config();
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|i| format!("indexing-thread-{i}"))
.build()
.unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
progress.update_progress(DocumentDeletionProgress::DeleteDocuments);
let mut indexer = indexer::DocumentDeletion::new();

View File

@ -15,3 +15,5 @@ license.workspace = true
insta = { version = "=1.39.0", features = ["json", "redactions"] }
md5 = "0.7.0"
once_cell = "1.20"
regex-lite = "0.1.6"
uuid = { version = "1.17.0", features = ["v4"] }

View File

@ -4,9 +4,16 @@ use std::path::{Path, PathBuf};
use std::sync::Mutex;
pub use insta;
use insta::internals::{Content, ContentPath};
use once_cell::sync::Lazy;
use regex_lite::Regex;
static SNAPSHOT_NAMES: Lazy<Mutex<HashMap<PathBuf, usize>>> = Lazy::new(Mutex::default);
/// A regex to match UUIDs in messages, specifically looking for the UUID v4 format
static UUID_IN_MESSAGE_RE: Lazy<Regex> = Lazy::new(|| {
Regex::new(r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}")
.unwrap()
});
/// Return the md5 hash of the given string
pub fn hash_snapshot(snap: &str) -> String {
@ -26,6 +33,39 @@ pub fn default_snapshot_settings_for_test<'a>(
let filename = path.file_name().unwrap().to_str().unwrap();
settings.set_omit_expression(true);
fn uuid_in_message_redaction(content: Content, _content_path: ContentPath) -> Content {
match &content {
Content::String(s) => {
let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "[uuid]");
Content::String(uuid_replaced.to_string())
}
_ => content,
}
}
fn uuid_in_json_key_redaction(content: Content, _content_path: ContentPath) -> Content {
match content {
Content::Map(map) => {
let new_map = map
.iter()
.map(|(key, value)| match key {
Content::String(s) => {
let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "[uuid]");
(Content::String(uuid_replaced.to_string()), value.clone())
}
_ => (key.clone(), value.clone()),
})
.collect();
Content::Map(new_map)
}
_ => content,
}
}
settings.add_dynamic_redaction(".**.message", uuid_in_message_redaction);
settings.add_dynamic_redaction(".**.indexUid", uuid_in_message_redaction);
settings.add_dynamic_redaction(".**.facetsByIndex", uuid_in_json_key_redaction);
let test_name = test_name.strip_suffix("::{{closure}}").unwrap_or(test_name);
let test_name = test_name.rsplit("::").next().unwrap().to_owned();
@ -232,6 +272,9 @@ macro_rules! json_string {
#[cfg(test)]
mod tests {
use crate as meili_snap;
use crate::UUID_IN_MESSAGE_RE;
use uuid::Uuid;
#[test]
fn snap() {
snapshot_hash!(10, @"d3d9446802a44259755d38e6d163e820");
@ -279,4 +322,14 @@ mod tests {
// snapshot_hash!("", name: "", @"d41d8cd98f00b204e9800998ecf8427e");
}
}
#[test]
fn uuid_in_message_regex() {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let uuid3 = Uuid::new_v4();
let to_replace = format!("1 {uuid1} 2 {uuid2} 3 {uuid3} 4");
let replaced = UUID_IN_MESSAGE_RE.replace_all(to_replace.as_str(), "[uuid]");
assert_eq!(replaced, "1 [uuid] 2 [uuid] 3 [uuid] 4");
}
}

View File

@ -697,7 +697,7 @@ pub fn apply_settings_to_builder(
match typo_tolerance {
Setting::Set(ref value) => {
match value.enabled {
Setting::Set(val) => builder.set_autorize_typos(val),
Setting::Set(val) => builder.set_authorize_typos(val),
Setting::Reset => builder.reset_authorize_typos(),
Setting::NotSet => (),
}

View File

@ -120,7 +120,7 @@ actix-web-lab = { version = "0.24.1", default-features = false }
actix-rt = "2.10.0"
brotli = "6.0.0"
# fixed version due to format breakages in v1.40
insta = "=1.39.0"
insta = { version = "=1.39.0", features = ["redactions"] }
manifest-dir-macros = "0.1.18"
maplit = "1.0.2"
meili-snap = { path = "../meili-snap" }

View File

@ -37,7 +37,9 @@ use index_scheduler::{IndexScheduler, IndexSchedulerOptions};
use meilisearch_auth::{open_auth_store_env, AuthController};
use meilisearch_types::milli::constants::VERSION_MAJOR;
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
use meilisearch_types::milli::update::{
default_thread_pool_and_threads, IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig,
};
use meilisearch_types::settings::apply_settings_to_builder;
use meilisearch_types::tasks::KindWithContent;
use meilisearch_types::versioning::{
@ -501,7 +503,19 @@ fn import_dump(
let network = dump_reader.network()?.cloned().unwrap_or_default();
index_scheduler.put_network(network)?;
let indexer_config = index_scheduler.indexer_config();
// 3.1 Use all cpus to process dump if `max_indexing_threads` not configured
let backup_config;
let base_config = index_scheduler.indexer_config();
let indexer_config = if base_config.max_threads.is_none() {
let (thread_pool, _) = default_thread_pool_and_threads();
let _config = IndexerConfig { thread_pool, ..*base_config };
backup_config = _config;
&backup_config
} else {
base_config
};
// /!\ The tasks must be imported AFTER importing the indexes or else the scheduler might
// try to process tasks while we're trying to import the indexes.

View File

@ -761,10 +761,12 @@ impl IndexerOpts {
max_indexing_memory.to_string(),
);
}
export_to_env_if_not_present(
MEILI_MAX_INDEXING_THREADS,
max_indexing_threads.0.to_string(),
);
if let Some(max_indexing_threads) = max_indexing_threads.0 {
export_to_env_if_not_present(
MEILI_MAX_INDEXING_THREADS,
max_indexing_threads.to_string(),
);
}
}
}
@ -772,15 +774,15 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
type Error = anyhow::Error;
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
let thread_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|index| format!("indexing-thread:{index}"))
.num_threads(*other.max_indexing_threads)
let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2))
.build()?;
Ok(Self {
thread_pool,
log_every_n: Some(DEFAULT_LOG_EVERY_N),
max_memory: other.max_indexing_memory.map(|b| b.as_u64() as usize),
thread_pool: Some(thread_pool),
max_threads: *other.max_indexing_threads,
max_positions_per_attributes: None,
skip_index_budget: other.skip_index_budget,
..Default::default()
@ -843,31 +845,31 @@ fn total_memory_bytes() -> Option<u64> {
}
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize)]
pub struct MaxThreads(usize);
#[derive(Default, Debug, Clone, Copy, Deserialize, Serialize)]
pub struct MaxThreads(Option<usize>);
impl FromStr for MaxThreads {
type Err = ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
usize::from_str(s).map(Self)
}
}
impl Default for MaxThreads {
fn default() -> Self {
MaxThreads(num_cpus::get() / 2)
fn from_str(s: &str) -> Result<MaxThreads, Self::Err> {
if s.is_empty() || s == "unlimited" {
return Ok(MaxThreads::default());
}
usize::from_str(s).map(Some).map(MaxThreads)
}
}
impl fmt::Display for MaxThreads {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
match self.0 {
Some(threads) => write!(f, "{}", threads),
None => write!(f, "unlimited"),
}
}
}
impl Deref for MaxThreads {
type Target = usize;
type Target = Option<usize>;
fn deref(&self) -> &Self::Target {
&self.0

View File

@ -0,0 +1,242 @@
use actix_web::web::{self, Data};
use actix_web::{HttpRequest, HttpResponse};
use deserr::actix_web::{AwebJson, AwebQueryParameter};
use index_scheduler::IndexScheduler;
use meilisearch_types::deserr::query_params::Param;
use meilisearch_types::deserr::{DeserrJsonError, DeserrQueryParamError};
use meilisearch_types::error::deserr_codes::*;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::keys::actions;
use meilisearch_types::serde_cs::vec::CS;
use serde_json::Value;
use tracing::debug;
use utoipa::{IntoParams, OpenApi};
use super::ActionPolicy;
use crate::analytics::Analytics;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::indexes::similar_analytics::{SimilarAggregator, SimilarGET, SimilarPOST};
use crate::search::{
add_search_rules, perform_similar, RankingScoreThresholdSimilar, RetrieveVectors, Route,
SearchKind, SimilarQuery, SimilarResult, DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET,
};
#[derive(OpenApi)]
#[openapi(
paths(similar_get, similar_post),
tags(
(
name = "Duplicate an index",
description = "The /duplicate route clones an index",
external_docs(url = "https://www.meilisearch.com/docs/reference/api/duplicate"),
),
),
)]
pub struct DuplicateApi;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(duplicate))));
}
/// Duplicate an index
#[utoipa::path(
post,
path = "{indexUid}/duplicate",
tag = "Duplicate an index",
security(("Bearer" = ["settings", "documents", "*"])),
params(("indexUid" = String, Path, example = "movies", description = "Index Unique Identifier", nullable = false)),
request_body = DuplicateQuery,
responses(
(status = 200, description = "The documents are returned", body = SimilarResult, content_type = "application/json", example = json!(
{
"hits": [
{
"id": 2770,
"title": "American Pie 2",
"poster": "https://image.tmdb.org/t/p/w1280/q4LNgUnRfltxzp3gf1MAGiK5LhV.jpg",
"overview": "The whole gang are back and as close as ever. They decide to get even closer by spending the summer together at a beach house. They decide to hold the biggest…",
"release_date": 997405200
},
{
"id": 190859,
"title": "American Sniper",
"poster": "https://image.tmdb.org/t/p/w1280/svPHnYE7N5NAGO49dBmRhq0vDQ3.jpg",
"overview": "U.S. Navy SEAL Chris Kyle takes his sole mission—protect his comrades—to heart and becomes one of the most lethal snipers in American history. His pinpoint accuracy not only saves countless lives but also makes him a prime…",
"release_date": 1418256000
}
],
"offset": 0,
"limit": 2,
"estimatedTotalHits": 976,
"processingTimeMs": 35,
"query": "american "
}
)),
(status = 404, description = "Index not found", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "Index `movies` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
}
)),
(status = 401, description = "The authorization header is missing", body = ResponseError, content_type = "application/json", example = json!(
{
"message": "The Authorization header is missing. It must use the bearer authorization method.",
"code": "missing_authorization_header",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#missing_authorization_header"
}
)),
)
)]
pub async fn similar_post(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: AwebJson<DuplicateQuery, DeserrJsonError>,
req: HttpRequest,
analytics: web::Data<Analytics>,
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let query = params.into_inner();
debug!(parameters = ?query, "Similar post");
let mut aggregate = SimilarAggregator::<SimilarPOST>::from_query(&query);
let similar = similar(index_scheduler, index_uid, query).await;
if let Ok(similar) = &similar {
aggregate.succeed(similar);
}
analytics.publish(aggregate, &req);
let similar = similar?;
debug!(returns = ?similar, "Similar post");
Ok(HttpResponse::Ok().json(similar))
}
async fn similar(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
index_uid: IndexUid,
mut query: SimilarQuery,
) -> Result<SimilarResult, ResponseError> {
let retrieve_vectors = RetrieveVectors::new(query.retrieve_vectors);
// Tenant token search_rules.
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
add_search_rules(&mut query.filter, search_rules);
}
let index = index_scheduler.index(&index_uid)?;
let (embedder_name, embedder, quantized) = SearchKind::embedder(
&index_scheduler,
index_uid.to_string(),
&index,
&query.embedder,
None,
Route::Similar,
)?;
tokio::task::spawn_blocking(move || {
perform_similar(
&index,
query,
embedder_name,
embedder,
quantized,
retrieve_vectors,
index_scheduler.features(),
)
})
.await?
}
#[derive(Debug, deserr::Deserr, IntoParams)]
#[deserr(error = DeserrQueryParamError, rename_all = camelCase, deny_unknown_fields)]
#[into_params(parameter_in = Query)]
pub struct SimilarQueryGet {
#[deserr(error = DeserrQueryParamError<InvalidSimilarId>)]
#[param(value_type = String)]
id: Param<String>,
#[deserr(default = Param(DEFAULT_SEARCH_OFFSET()), error = DeserrQueryParamError<InvalidSimilarOffset>)]
#[param(value_type = usize, default = DEFAULT_SEARCH_OFFSET)]
offset: Param<usize>,
#[deserr(default = Param(DEFAULT_SEARCH_LIMIT()), error = DeserrQueryParamError<InvalidSimilarLimit>)]
#[param(value_type = usize, default = DEFAULT_SEARCH_LIMIT)]
limit: Param<usize>,
#[deserr(default, error = DeserrQueryParamError<InvalidSimilarAttributesToRetrieve>)]
#[param(value_type = Vec<String>)]
attributes_to_retrieve: Option<CS<String>>,
#[deserr(default, error = DeserrQueryParamError<InvalidSimilarRetrieveVectors>)]
#[param(value_type = bool, default)]
retrieve_vectors: Param<bool>,
#[deserr(default, error = DeserrQueryParamError<InvalidSimilarFilter>)]
filter: Option<String>,
#[deserr(default, error = DeserrQueryParamError<InvalidSimilarShowRankingScore>)]
#[param(value_type = bool, default)]
show_ranking_score: Param<bool>,
#[deserr(default, error = DeserrQueryParamError<InvalidSimilarShowRankingScoreDetails>)]
#[param(value_type = bool, default)]
show_ranking_score_details: Param<bool>,
#[deserr(default, error = DeserrQueryParamError<InvalidSimilarRankingScoreThreshold>, default)]
#[param(value_type = Option<f32>)]
pub ranking_score_threshold: Option<RankingScoreThresholdGet>,
#[deserr(error = DeserrQueryParamError<InvalidSimilarEmbedder>)]
pub embedder: String,
}
#[derive(Debug, Clone, Copy, PartialEq, deserr::Deserr)]
#[deserr(try_from(String) = TryFrom::try_from -> InvalidSimilarRankingScoreThreshold)]
pub struct RankingScoreThresholdGet(RankingScoreThresholdSimilar);
impl std::convert::TryFrom<String> for RankingScoreThresholdGet {
type Error = InvalidSimilarRankingScoreThreshold;
fn try_from(s: String) -> Result<Self, Self::Error> {
let f: f64 = s.parse().map_err(|_| InvalidSimilarRankingScoreThreshold)?;
Ok(RankingScoreThresholdGet(RankingScoreThresholdSimilar::try_from(f)?))
}
}
impl From<SimilarQueryGet> for SimilarQuery {
fn from(
SimilarQueryGet {
id,
offset,
limit,
attributes_to_retrieve,
retrieve_vectors,
filter,
show_ranking_score,
show_ranking_score_details,
embedder,
ranking_score_threshold,
}: SimilarQueryGet,
) -> Self {
let filter = match filter {
Some(f) => match serde_json::from_str(&f) {
Ok(v) => Some(v),
_ => Some(Value::String(f)),
},
None => None,
};
SimilarQuery {
id: serde_json::Value::String(id.0),
offset: offset.0,
limit: limit.0,
filter,
embedder,
attributes_to_retrieve: attributes_to_retrieve.map(|o| o.into_iter().collect()),
retrieve_vectors: retrieve_vectors.0,
show_ranking_score: show_ranking_score.0,
show_ranking_score_details: show_ranking_score_details.0,
ranking_score_threshold: ranking_score_threshold.map(|x| x.0),
}
}
}

View File

@ -29,6 +29,7 @@ use crate::routes::is_dry_run;
use crate::Opt;
pub mod documents;
pub mod duplicate;
pub mod facet_search;
pub mod search;
mod search_analytics;
@ -77,7 +78,8 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
.service(web::scope("/search").configure(search::configure))
.service(web::scope("/facet-search").configure(facet_search::configure))
.service(web::scope("/similar").configure(similar::configure))
.service(web::scope("/settings").configure(settings::configure)),
.service(web::scope("/settings").configure(settings::configure))
.service(web::scope("/duplicate").configure(duplicate::configure)),
);
}

View File

@ -1,5 +1,5 @@
//! This file implements a queue of searches to process and the ability to control how many searches can be run in parallel.
//! We need this because we don't want to process more search requests than we have cores.
//! We need this because we don't want to process more search requests than the available CPU cores.
//! That slows down everything and consumes RAM for no reason.
//! The steps to do a search are to get the `SearchQueue` data structure and try to get a search permit.
//! This can fail if the queue is full, and we need to drop your search request to register a new one.
@ -8,7 +8,7 @@
//!
//! In order to do a search request you should try to get a search permit.
//! Retrieve the `SearchQueue` structure from actix-web (`search_queue: Data<SearchQueue>`)
//! and right before processing the search, calls the `SearchQueue::try_get_search_permit` method: `search_queue.try_get_search_permit().await?;`
//! and right before processing the search, call the `SearchQueue::try_get_search_permit` method: `search_queue.try_get_search_permit().await?;`
//!
//! What is going to happen at this point is that you're going to send a oneshot::Sender over an async mpsc channel.
//! Then, the queue/scheduler is going to either:
@ -121,12 +121,12 @@ impl SearchQueue {
let mut queue: Vec<oneshot::Sender<Permit>> = Default::default();
let mut rng: StdRng = StdRng::from_entropy();
let mut searches_running: usize = 0;
// By having a capacity of parallelism we ensures that every time a search finish it can release its RAM asap
// By having a capacity of parallelism we ensure that every time a search finish it can release its RAM asap
let (sender, mut search_finished) = mpsc::channel(parallelism.into());
loop {
tokio::select! {
// biased select because we wants to free up space before trying to register new tasks
// biased select because we want to free up space before trying to register new tasks
biased;
_ = search_finished.recv() => {
searches_running = searches_running.saturating_sub(1);
@ -148,11 +148,11 @@ impl SearchQueue {
if searches_running < usize::from(parallelism) && queue.is_empty() {
searches_running += 1;
// if the search requests die it's not a hard error on our side
// if the search requests die, it's not a hard error on our side
let _ = search_request.send(Permit { sender: sender.clone() });
continue;
} else if capacity == 0 {
// in the very specific case where we have a capacity of zero
// in the very specific case where we have a capacity of zero,
// we must refuse the request straight away without going through
// the queue stuff.
drop(search_request);
@ -183,7 +183,7 @@ impl SearchQueue {
.map_err(|_| MeilisearchHttpError::TooManySearchRequests(self.capacity))?;
// If we've been for more than one minute to get a search permit, it's better to simply
// abort the search request than spending time processing something were the client
// abort the search request than spending time processing something where the client
// most certainly exited or got a timeout a long time ago.
// We may find a better solution in https://github.com/actix/actix-web/issues/3462.
if now.elapsed() > self.time_to_abort {

View File

@ -538,7 +538,7 @@ async fn error_add_api_key_parameters_uid_already_exist() {
let (response, code) = server.add_api_key(content).await;
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r###"
{
"message": "`uid` field value `4bc0887a-0e41-4f3b-935d-0c451dcee9c8` is already an existing API key.",
"message": "`uid` field value `[uuid]` is already an existing API key.",
"code": "api_key_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#api_key_already_exists"

View File

@ -29,6 +29,10 @@ impl<'a> Index<'a, Owned> {
}
}
pub fn with_encoder(&self, encoder: Encoder) -> Index<'a, Owned> {
Index { uid: self.uid.clone(), service: self.service, encoder, marker: PhantomData }
}
pub async fn load_test_set(&self) -> u64 {
let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref()));
let (response, code) = self
@ -290,6 +294,20 @@ impl Index<'_, Shared> {
}
(task, code)
}
pub async fn update_index_fail(&self, primary_key: Option<&str>) -> (Value, StatusCode) {
let (mut task, code) = self._update(primary_key).await;
if code.is_success() {
task = self.wait_task(task.uid()).await;
if task.is_success() {
panic!(
"`update_index_fail` succeeded: {}",
serde_json::to_string_pretty(&task).unwrap()
);
}
}
(task, code)
}
}
#[allow(dead_code)]
@ -333,6 +351,14 @@ impl<State> Index<'_, State> {
self.service.post_encoded("/indexes", body, self.encoder).await
}
pub(super) async fn _update(&self, primary_key: Option<&str>) -> (Value, StatusCode) {
let body = json!({
"primaryKey": primary_key,
});
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.patch_encoded(url, body, self.encoder).await
}
pub(super) async fn _delete(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.delete(url).await

View File

@ -128,7 +128,8 @@ impl Display for Value {
".finishedAt" => "[date]",
".duration" => "[duration]",
".processingTimeMs" => "[duration]",
".details.embedders.*.url" => "[url]"
".details.embedders.*.url" => "[url]",
".details.dumpUid" => "[dump_uid]",
})
)
}
@ -264,6 +265,24 @@ pub static SCORE_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
])
});
pub async fn shared_index_with_score_documents() -> &'static Index<'static, Shared> {
static INDEX: OnceCell<Index<'static, Shared>> = OnceCell::const_new();
INDEX.get_or_init(|| async {
let server = Server::new_shared();
let index = server._index("SHARED_SCORE_DOCUMENTS").to_shared();
let documents = SCORE_DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None).await;
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}),
)
.await;
index.wait_task(response.uid()).await.succeeded();
index
}).await
}
pub static NESTED_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
@ -333,7 +352,7 @@ pub async fn shared_index_with_nested_documents() -> &'static Index<'static, Sha
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
json!({"filterableAttributes": ["father", "doggos"], "sortableAttributes": ["doggos"]}),
json!({"filterableAttributes": ["father", "doggos", "cattos"], "sortableAttributes": ["doggos"]}),
)
.await;
index.wait_task(response.uid()).await.succeeded();
@ -435,3 +454,57 @@ pub async fn shared_index_with_test_set() -> &'static Index<'static, Shared> {
})
.await
}
pub static GEO_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"id": 1,
"name": "Taco Truck",
"address": "444 Salsa Street, Burritoville",
"type": "Mexican",
"rating": 9,
"_geo": {
"lat": 34.0522,
"lng": -118.2437
}
},
{
"id": 2,
"name": "La Bella Italia",
"address": "456 Elm Street, Townsville",
"type": "Italian",
"rating": 9,
"_geo": {
"lat": "45.4777599",
"lng": "9.1967508"
}
},
{
"id": 3,
"name": "Crêpe Truck",
"address": "2 Billig Avenue, Rouenville",
"type": "French",
"rating": 10
}
])
});
pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared> {
static INDEX: OnceCell<Index<'static, Shared>> = OnceCell::const_new();
INDEX
.get_or_init(|| async {
let server = Server::new_shared();
let index = server._index("SHARED_GEO_DOCUMENTS").to_shared();
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await;
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
json!({"filterableAttributes": ["_geo"], "sortableAttributes": ["_geo"]}),
)
.await;
index.wait_task(response.uid()).await.succeeded();
index
})
.await
}

View File

@ -347,6 +347,16 @@ impl<State> Server<State> {
}
}
pub fn unique_index_with_prefix(&self, prefix: &str) -> Index<'_> {
let uuid = Uuid::new_v4();
Index {
uid: format!("{prefix}-{}", uuid),
service: &self.service,
encoder: Encoder::Plain,
marker: PhantomData,
}
}
pub fn unique_index_with_encoder(&self, encoder: Encoder) -> Index<'_> {
let uuid = Uuid::new_v4();
Index { uid: uuid.to_string(), service: &self.service, encoder, marker: PhantomData }
@ -399,18 +409,9 @@ impl<State> Server<State> {
pub async fn wait_task(&self, update_id: u64) -> Value {
// try several times to get status, or panic to not wait forever
let url = format!("/tasks/{}", update_id);
// Increase timeout for vector-related tests
let max_attempts = if url.contains("/tasks/") {
if update_id > 1000 {
400 // 200 seconds for vector tests
} else {
100 // 50 seconds for other tests
}
} else {
100 // 50 seconds for other tests
};
let max_attempts = 400; // 200 seconds total, 0.5s per attempt
for _ in 0..max_attempts {
for i in 0..max_attempts {
let (response, status_code) = self.service.get(&url).await;
assert_eq!(200, status_code, "response: {}", response);
@ -420,6 +421,10 @@ impl<State> Server<State> {
// wait 0.5 second.
sleep(Duration::from_millis(500)).await;
if i == max_attempts - 1 {
dbg!(response);
}
}
panic!("Timeout waiting for update id");
}

File diff suppressed because it is too large Load Diff

View File

@ -1,39 +1,35 @@
use meili_snap::{json_string, snapshot};
use crate::common::{GetAllDocumentsOptions, Server};
use crate::common::{shared_does_not_exists_index, GetAllDocumentsOptions, Server};
use crate::json;
#[actix_rt::test]
async fn delete_one_document_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (task, code) = index.delete_document(0).await;
let index = shared_does_not_exists_index().await;
let (task, code) = index.delete_document_by_filter_fail(json!({"filter": "a = b"})).await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
index.wait_task(task.uid()).await.failed();
}
#[actix_rt::test]
async fn delete_one_unexisting_document() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.create(None).await;
let (response, code) = index.delete_document(0).await;
assert_eq!(code, 202, "{}", response);
let update = index.wait_task(response.uid()).await;
assert_eq!(update["status"], "succeeded");
assert_eq!(code, 202, "{response}");
index.wait_task(response.uid()).await.succeeded();
}
#[actix_rt::test]
async fn delete_one_document() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.add_documents(json!([{ "id": 0, "content": "foobar" }]), None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, status_code) = server.index("test").delete_document(0).await;
let (task, status_code) = index.delete_document(0).await;
assert_eq!(status_code, 202);
index.wait_task(task.uid()).await.succeeded();
@ -43,20 +39,18 @@ async fn delete_one_document() {
#[actix_rt::test]
async fn clear_all_documents_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
index.wait_task(task.uid()).await.failed();
}
#[actix_rt::test]
async fn clear_all_documents() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index
.add_documents(
json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }]),
@ -67,7 +61,7 @@ async fn clear_all_documents() {
let (task, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let _update = index.wait_task(task.uid()).await;
let _update = index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert!(response["results"].as_array().unwrap().is_empty());
@ -75,14 +69,14 @@ async fn clear_all_documents() {
#[actix_rt::test]
async fn clear_all_documents_empty_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let _update = index.wait_task(task.uid()).await;
let _update = index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert!(response["results"].as_array().unwrap().is_empty());
@ -90,33 +84,31 @@ async fn clear_all_documents_empty_index() {
#[actix_rt::test]
async fn error_delete_batch_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.delete_batch(vec![]).await;
let expected_response = json!({
"message": "Index `test` not found.",
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
let response = index.wait_task(task.uid()).await.failed();
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn delete_batch() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task,_status_code) = index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.delete_batch(vec![1, 0]).await;
assert_eq!(code, 202);
let _update = index.wait_task(task.uid()).await;
let _update = index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 1);
@ -125,14 +117,14 @@ async fn delete_batch() {
#[actix_rt::test]
async fn delete_no_document_batch() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task,_status_code) = index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
index.wait_task(task.uid()).await.succeeded();
let (_response, code) = index.delete_batch(vec![]).await;
assert_eq!(code, 202, "{}", _response);
let (response, code) = index.delete_batch(vec![]).await;
assert_eq!(code, 202, "{response}");
let _update = index.wait_task(_response.uid()).await;
let _update = index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 3);
@ -140,8 +132,8 @@ async fn delete_no_document_batch() {
#[actix_rt::test]
async fn delete_document_by_filter() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings_filterable_attributes(json!(["color"])).await;
let (task, _status_code) = index
.add_documents(
@ -178,22 +170,22 @@ async fn delete_document_by_filter() {
let (response, code) =
index.delete_document_by_filter(json!({ "filter": "color = blue"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 2,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -251,22 +243,22 @@ async fn delete_document_by_filter() {
let (response, code) =
index.delete_document_by_filter(json!({ "filter": "color NOT EXISTS"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"taskUid": 3,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 3,
"batchUid": 3,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -321,8 +313,8 @@ async fn delete_document_by_filter() {
#[actix_rt::test]
async fn delete_document_by_complex_filter() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings_filterable_attributes(json!(["color"])).await;
let (task, _status_code) = index
.add_documents(
@ -343,22 +335,22 @@ async fn delete_document_by_complex_filter() {
)
.await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 2,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -402,22 +394,22 @@ async fn delete_document_by_complex_filter() {
.delete_document_by_filter(json!({ "filter": [["color = green", "color NOT EXISTS"]] }))
.await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"taskUid": 3,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 3,
"batchUid": 3,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,

View File

@ -621,7 +621,7 @@ async fn delete_document_by_filter() {
let (response, code) =
index.delete_document_by_filter_fail(json!({ "filter": "catto = jorts"})).await;
snapshot!(code, @"202 Accepted");
let response = server.wait_task(response.uid()).await;
let response = server.wait_task(response.uid()).await.failed();
snapshot!(response, @r###"
{
"uid": "[uid]",
@ -665,7 +665,7 @@ async fn fetch_document_by_filter() {
Some("id"),
)
.await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (response, code) = index.fetch_documents(json!(null)).await;
snapshot!(code, @"400 Bad Request");

View File

@ -832,8 +832,8 @@ async fn get_document_by_ids_and_filter() {
#[actix_rt::test]
async fn get_document_with_vectors() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({

View File

@ -6,19 +6,18 @@ use crate::json;
#[actix_rt::test]
async fn error_document_update_create_index_bad_uid() {
let server = Server::new().await;
let index = server.index("883 fj!");
let server = Server::new_shared();
let index = server.unique_index_with_prefix("883 fj!");
let (response, code) = index.update_documents(json!([{"id": 1}]), None).await;
let expected_response = json!({
"message": "`883 fj!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.",
"code": "invalid_index_uid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
});
assert_eq!(code, 400);
assert_eq!(response, expected_response);
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "`883 fj!-[uuid]` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_), and can not be more than 512 bytes.",
"code": "invalid_index_uid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
}"###);
}
#[actix_rt::test]

View File

@ -46,8 +46,10 @@ async fn create_index_with_gzip_encoded_request_and_receiving_brotli_encoded_res
let server = Server::new_shared();
let app = server.init_web_app().await;
let index = server.unique_index_with_prefix("test");
let body = serde_json::to_string(&json!({
"uid": "test",
"uid": index.uid.clone(),
"primaryKey": None::<&str>,
}))
.unwrap();
@ -68,7 +70,7 @@ async fn create_index_with_gzip_encoded_request_and_receiving_brotli_encoded_res
let parsed_response =
serde_json::from_slice::<Value>(decoded.into().as_ref()).expect("Expecting valid json");
assert_eq!(parsed_response["indexUid"], "test");
assert_eq!(parsed_response["indexUid"], index.uid);
}
#[actix_rt::test]

View File

@ -28,6 +28,7 @@ async fn error_delete_unexisting_index() {
let (task, code) = index.delete_index_fail().await;
assert_eq!(code, 202);
index.wait_task(task.uid()).await.failed();
let expected_response = json!({
"message": "Index `DOES_NOT_EXISTS` not found.",
@ -57,7 +58,7 @@ async fn loop_delete_add_documents() {
}
for task in tasks {
let response = index.wait_task(task).await;
let response = index.wait_task(task).await.succeeded();
assert_eq!(response["status"], "succeeded", "{}", response);
}
}

View File

@ -52,19 +52,28 @@ async fn no_index_return_empty_list() {
#[actix_rt::test]
async fn list_multiple_indexes() {
let server = Server::new().await;
server.index("test").create(None).await;
let (task, _status_code) = server.index("test1").create(Some("key")).await;
let server = Server::new_shared();
server.index("test").wait_task(task.uid()).await.succeeded();
let index_without_key = server.unique_index();
let (response_without_key, _status_code) = index_without_key.create(None).await;
let (response, code) = server.list_indexes(None, None).await;
let index_with_key = server.unique_index();
let (response_with_key, _status_code) = index_with_key.create(Some("key")).await;
index_without_key.wait_task(response_without_key.uid()).await.succeeded();
index_with_key.wait_task(response_with_key.uid()).await.succeeded();
let (response, code) = server.list_indexes(None, Some(1000)).await;
assert_eq!(code, 200);
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 2);
assert!(arr.iter().any(|entry| entry["uid"] == "test" && entry["primaryKey"] == Value::Null));
assert!(arr.iter().any(|entry| entry["uid"] == "test1" && entry["primaryKey"] == "key"));
assert!(arr.len() >= 2, "Expected at least 2 indexes.");
assert!(arr
.iter()
.any(|entry| entry["uid"] == index_without_key.uid && entry["primaryKey"] == Value::Null));
assert!(arr
.iter()
.any(|entry| entry["uid"] == index_with_key.uid && entry["primaryKey"] == "key"));
}
#[actix_rt::test]

View File

@ -1,10 +1,11 @@
use crate::common::Server;
use crate::common::{shared_does_not_exists_index, Server};
use crate::json;
#[actix_rt::test]
async fn stats() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.create(Some("id")).await;
assert_eq!(code, 202);
@ -15,7 +16,7 @@ async fn stats() {
assert_eq!(code, 200);
assert_eq!(response["numberOfDocuments"], 0);
assert!(response["isIndexing"] == false);
assert_eq!(response["isIndexing"], false);
assert!(response["fieldDistribution"].as_object().unwrap().is_empty());
let documents = json!([
@ -31,7 +32,6 @@ async fn stats() {
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
assert_eq!(response["taskUid"], 1);
index.wait_task(response.uid()).await.succeeded();
@ -39,7 +39,7 @@ async fn stats() {
assert_eq!(code, 200);
assert_eq!(response["numberOfDocuments"], 2);
assert!(response["isIndexing"] == false);
assert_eq!(response["isIndexing"], false);
assert_eq!(response["fieldDistribution"]["id"], 2);
assert_eq!(response["fieldDistribution"]["name"], 1);
assert_eq!(response["fieldDistribution"]["age"], 1);
@ -47,11 +47,11 @@ async fn stats() {
#[actix_rt::test]
async fn error_get_stats_unexisting_index() {
let server = Server::new().await;
let (response, code) = server.index("test").stats().await;
let index = shared_does_not_exists_index().await;
let (response, code) = index.stats().await;
let expected_response = json!({
"message": "Index `test` not found.",
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"

View File

@ -2,28 +2,26 @@ use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::Server;
use crate::common::{shared_does_not_exists_index, shared_index_with_documents, Server};
use crate::json;
#[actix_rt::test]
async fn update_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(None).await;
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index.update(Some("primary")).await;
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["uid"], "test");
assert_eq!(response["uid"], index.uid);
assert!(response.get("createdAt").is_some());
assert!(response.get("updatedAt").is_some());
@ -39,24 +37,23 @@ async fn update_primary_key() {
#[actix_rt::test]
async fn create_and_update_with_different_encoding() {
let server = Server::new().await;
let index = server.index_with_encoder("test", Encoder::Gzip);
let (_, code) = index.create(None).await;
let server = Server::new_shared();
let index = server.unique_index_with_encoder(Encoder::Gzip);
let (create_task, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(create_task.uid()).await.succeeded();
let index = server.index_with_encoder("test", Encoder::Brotli);
let index = index.with_encoder(Encoder::Brotli);
let (task, _status_code) = index.update(Some("primary")).await;
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
}
#[actix_rt::test]
async fn update_nothing() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task1, code) = index.create(None).await;
assert_eq!(code, 202);
@ -67,35 +64,20 @@ async fn update_nothing() {
assert_eq!(code, 202);
let response = index.wait_task(task2.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task2.uid()).await.succeeded();
}
#[actix_rt::test]
async fn error_update_existing_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.create(Some("id")).await;
let index = shared_index_with_documents().await;
let (update_task, code) = index.update_index_fail(Some("primary")).await;
assert_eq!(code, 202);
let documents = json!([
{
"id": "11",
"content": "foobar"
}
]);
index.add_documents(documents, None).await;
let (task, code) = index.update(Some("primary")).await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
let response = index.wait_task(update_task.uid()).await.failed();
let expected_response = json!({
"message": "Index `test`: Index already has a primary key: `id`.",
"message": format!("Index `{}`: Index already has a primary key: `id`.", index.uid),
"code": "index_primary_key_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_primary_key_already_exists"
@ -106,15 +88,15 @@ async fn error_update_existing_primary_key() {
#[actix_rt::test]
async fn error_update_unexisting_index() {
let server = Server::new().await;
let (task, code) = server.index("test").update(None).await;
let index = shared_does_not_exists_index().await;
let (task, code) = index.update_index_fail(Some("my-primary-key")).await;
assert_eq!(code, 202);
let response = server.index("test").wait_task(task.uid()).await;
let response = index.wait_task(task.uid()).await.failed();
let expected_response = json!({
"message": "Index `test` not found.",
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"

View File

@ -146,8 +146,8 @@ static DOCUMENT_DISTINCT_KEY: &str = "product_id";
/// testing: https://github.com/meilisearch/meilisearch/issues/4078
#[actix_rt::test]
async fn distinct_search_with_offset_no_ranking() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.add_documents(documents, Some(DOCUMENT_PRIMARY_KEY)).await;
@ -163,50 +163,50 @@ async fn distinct_search_with_offset_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["123456", "789012"]"#);
snapshot!(format!("{hits:?}"), @r#"["123456", "789012"]"#);
snapshot!(response["estimatedTotalHits"] , @"11");
let (response, code) = index.search_post(json!({"offset": 2, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["456789", "987654"]"#);
snapshot!(format!("{hits:?}"), @r#"["456789", "987654"]"#);
snapshot!(response["estimatedTotalHits"], @"10");
let (response, code) = index.search_post(json!({"offset": 4, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["234567", "345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["234567", "345678"]"#);
snapshot!(response["estimatedTotalHits"], @"6");
let (response, code) = index.search_post(json!({"offset": 5, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"1");
snapshot!(format!("{:?}", hits), @r#"["345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["345678"]"#);
snapshot!(response["estimatedTotalHits"], @"6");
let (response, code) = index.search_post(json!({"offset": 6, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["estimatedTotalHits"], @"6");
let (response, code) = index.search_post(json!({"offset": 7, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["estimatedTotalHits"], @"6");
}
/// testing: https://github.com/meilisearch/meilisearch/issues/4130
#[actix_rt::test]
async fn distinct_search_with_pagination_no_ranking() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.add_documents(documents, Some(DOCUMENT_PRIMARY_KEY)).await;
@ -222,7 +222,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["page"], @"0");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -231,7 +231,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["123456", "789012"]"#);
snapshot!(format!("{hits:?}"), @r#"["123456", "789012"]"#);
snapshot!(response["page"], @"1");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -240,7 +240,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["456789", "987654"]"#);
snapshot!(format!("{hits:?}"), @r#"["456789", "987654"]"#);
snapshot!(response["page"], @"2");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -249,7 +249,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["234567", "345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["234567", "345678"]"#);
snapshot!(response["page"], @"3");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -258,7 +258,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["page"], @"4");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -267,7 +267,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"3");
snapshot!(format!("{:?}", hits), @r#"["987654", "234567", "345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["987654", "234567", "345678"]"#);
snapshot!(response["page"], @"2");
snapshot!(response["totalPages"], @"2");
snapshot!(response["totalHits"], @"6");
@ -275,13 +275,13 @@ async fn distinct_search_with_pagination_no_ranking() {
#[actix_rt::test]
async fn distinct_at_search_time() {
let server = Server::new().await;
let index = server.index("tamo");
let server = Server::new_shared();
let index = server.unique_index();
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, Some(DOCUMENT_PRIMARY_KEY)).await;
let (task, _) = index.update_settings_filterable_attributes(json!(["color.main"])).await;
let task = index.wait_task(task.uid()).await;
let task = index.wait_task(task.uid()).await.succeeded();
snapshot!(task, name: "succeed");
fn get_hits(response: &Value) -> Vec<String> {
@ -299,7 +299,7 @@ async fn distinct_at_search_time() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"3");
snapshot!(format!("{:?}", hits), @r###"["1", "2", "3"]"###);
snapshot!(format!("{hits:?}"), @r###"["1", "2", "3"]"###);
snapshot!(response["page"], @"1");
snapshot!(response["totalPages"], @"1");
snapshot!(response["totalHits"], @"3");

View File

@ -707,7 +707,7 @@ async fn filter_invalid_attribute_array() {
|response, code| {
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"message": "Index `[uuid]`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -728,7 +728,7 @@ async fn filter_invalid_attribute_string() {
|response, code| {
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"message": "Index `[uuid]`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -885,7 +885,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -911,7 +911,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -932,7 +932,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -958,7 +958,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -984,7 +984,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `TO` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `TO` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -1143,7 +1143,7 @@ async fn search_on_unknown_field() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"message": "Index `[uuid]`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"code": "invalid_search_attributes_to_search_on",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_attributes_to_search_on"
@ -1164,7 +1164,7 @@ async fn search_on_unknown_field_plus_joker() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"message": "Index `[uuid]`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"code": "invalid_search_attributes_to_search_on",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_attributes_to_search_on"
@ -1182,7 +1182,7 @@ async fn search_on_unknown_field_plus_joker() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"message": "Index `[uuid]`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"code": "invalid_search_attributes_to_search_on",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_attributes_to_search_on"
@ -1195,10 +1195,8 @@ async fn search_on_unknown_field_plus_joker() {
#[actix_rt::test]
async fn distinct_at_search_time() {
let server = Server::new().await;
let index = server.index("test");
let (task, _) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) =
index.add_documents(json!([{"id": 1, "color": "Doggo", "machin": "Action"}]), None).await;
index.wait_task(response.uid()).await.succeeded();
@ -1208,7 +1206,7 @@ async fn distinct_at_search_time() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. This index does not have configured filterable attributes.",
"message": "Index `[uuid]`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. This index does not have configured filterable attributes.",
"code": "invalid_search_distinct",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_distinct"
@ -1223,7 +1221,7 @@ async fn distinct_at_search_time() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, machin`.",
"message": "Index `[uuid]`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, machin`.",
"code": "invalid_search_distinct",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_distinct"
@ -1238,7 +1236,7 @@ async fn distinct_at_search_time() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, <..hidden-attributes>`.",
"message": "Index `[uuid]`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, <..hidden-attributes>`.",
"code": "invalid_search_distinct",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_distinct"

View File

@ -50,13 +50,11 @@ async fn test_settings_documents_indexing_swapping_and_facet_search(
let (task, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.update_settings(settings.clone()).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.facet_search(query.clone()).await;
insta::allow_duplicates! {
@ -65,21 +63,18 @@ async fn test_settings_documents_indexing_swapping_and_facet_search(
let (task, code) = server.delete_index("test").await;
assert_eq!(code, 202, "{}", task);
let response = server.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
server.wait_task(task.uid()).await.succeeded();
eprintln!("Settings -> Documents -> test");
let index = server.index("test");
let (task, code) = index.update_settings(settings.clone()).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.facet_search(query.clone()).await;
insta::allow_duplicates! {
@ -88,14 +83,13 @@ async fn test_settings_documents_indexing_swapping_and_facet_search(
let (task, code) = server.delete_index("test").await;
assert_eq!(code, 202, "{}", task);
let response = server.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
server.wait_task(task.uid()).await.succeeded();
}
#[actix_rt::test]
async fn simple_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -105,20 +99,20 @@ async fn simple_facet_search() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 2);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 2);
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "adventure"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 1);
}
#[actix_rt::test]
async fn simple_facet_search_on_movies() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{
@ -212,23 +206,23 @@ async fn simple_facet_search_on_movies() {
]);
let (response, code) =
index.update_settings_filterable_attributes(json!(["genres", "color"])).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetQuery": "", "facetName": "genres", "q": "" })).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(response["facetHits"], @r###"[{"value":"Action","count":2},{"value":"Adventure","count":3},{"value":"Drama","count":3},{"value":"Fantasy","count":1},{"value":"Romance","count":1},{"value":"Science Fiction","count":1}]"###);
}
#[actix_rt::test]
async fn advanced_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -251,8 +245,8 @@ async fn advanced_facet_search() {
#[actix_rt::test]
async fn more_advanced_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -275,8 +269,8 @@ async fn more_advanced_facet_search() {
#[actix_rt::test]
async fn simple_facet_search_with_max_values() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_faceting(json!({ "maxValuesPerFacet": 1 })).await;
@ -287,14 +281,14 @@ async fn simple_facet_search_with_max_values() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 1);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 1);
}
#[actix_rt::test]
async fn simple_facet_search_by_count_with_max_values() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index
@ -309,14 +303,14 @@ async fn simple_facet_search_by_count_with_max_values() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 1);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 1);
}
#[actix_rt::test]
async fn non_filterable_facet_search_error() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
@ -324,17 +318,17 @@ async fn non_filterable_facet_search_error() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "adv"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
}
#[actix_rt::test]
async fn facet_search_dont_support_words() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -344,14 +338,14 @@ async fn facet_search_dont_support_words() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "words"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 0);
}
#[actix_rt::test]
async fn simple_facet_search_with_sort_by_count() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_faceting(json!({ "sortFacetValuesBy": { "*": "count" } })).await;
@ -362,7 +356,7 @@ async fn simple_facet_search_with_sort_by_count() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["facetHits"].as_array().unwrap();
assert_eq!(hits.len(), 2);
assert_eq!(hits[0], json!({ "value": "Action", "count": 3 }));
@ -371,25 +365,25 @@ async fn simple_facet_search_with_sort_by_count() {
#[actix_rt::test]
async fn add_documents_and_deactivate_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
.update_settings(json!({
"facetSearch": false,
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
snapshot!(response, @r###"
{
"message": "The facet search is disabled for this index",
@ -402,8 +396,8 @@ async fn add_documents_and_deactivate_facet_search() {
#[actix_rt::test]
async fn deactivate_facet_search_and_add_documents() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -411,16 +405,16 @@ async fn deactivate_facet_search_and_add_documents() {
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
snapshot!(response, @r###"
{
"message": "The facet search is disabled for this index",
@ -433,8 +427,8 @@ async fn deactivate_facet_search_and_add_documents() {
#[actix_rt::test]
async fn deactivate_facet_search_add_documents_and_activate_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -442,31 +436,31 @@ async fn deactivate_facet_search_add_documents_and_activate_facet_search() {
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
.update_settings(json!({
"facetSearch": true,
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 2);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn deactivate_facet_search_add_documents_and_reset_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -474,25 +468,25 @@ async fn deactivate_facet_search_add_documents_and_reset_facet_search() {
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
.update_settings(json!({
"facetSearch": serde_json::Value::Null,
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 2);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
@ -618,8 +612,8 @@ async fn facet_search_with_filterable_attributes_rules_errors() {
#[actix_rt::test]
async fn distinct_facet_search_on_movies() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{
@ -925,26 +919,26 @@ async fn distinct_facet_search_on_movies() {
]);
let (response, code) =
index.update_settings_filterable_attributes(json!(["genres", "color"])).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.update_settings_distinct_attribute(json!("color")).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetQuery": "blob", "facetName": "genres", "q": "" })).await;
// non-exhaustive facet count is counting 27 documents with the facet query "blob" but there are only 23 documents with a distinct color.
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(response["facetHits"], @r###"[{"value":"Blob","count":27}]"###);
let (response, code) =
index.facet_search(json!({"facetQuery": "blob", "facetName": "genres", "q": "", "exhaustiveFacetCount": true })).await;
// exhaustive facet count is counting 23 documents with the facet query "blob" which is the number of distinct colors.
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(response["facetHits"], @r###"[{"value":"Blob","count":23}]"###);
}

View File

@ -4,23 +4,14 @@ use tempfile::TempDir;
use super::test_settings_documents_indexing_swapping_and_search;
use crate::common::{
default_settings, shared_index_with_documents, Server, DOCUMENTS, NESTED_DOCUMENTS,
default_settings, shared_index_with_documents, shared_index_with_nested_documents, Server,
DOCUMENTS, NESTED_DOCUMENTS,
};
use crate::json;
#[actix_rt::test]
async fn search_with_filter_string_notation() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.update_settings(json!({"filterableAttributes": ["title"]})).await;
meili_snap::snapshot!(code, @"202 Accepted");
let documents = DOCUMENTS.clone();
let (task, code) = index.add_documents(documents, None).await;
meili_snap::snapshot!(code, @"202 Accepted");
let res = index.wait_task(task.uid()).await;
meili_snap::snapshot!(res["status"], @r###""succeeded""###);
let index = shared_index_with_documents().await;
index
.search(
@ -28,44 +19,34 @@ async fn search_with_filter_string_notation() {
"filter": "title = Gläss"
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
},
)
.await;
let index = server.index("nested");
let nested_index = shared_index_with_nested_documents().await;
let (_, code) =
index.update_settings(json!({"filterableAttributes": ["cattos", "doggos.age"]})).await;
meili_snap::snapshot!(code, @"202 Accepted");
let documents = NESTED_DOCUMENTS.clone();
let (task, code) = index.add_documents(documents, None).await;
meili_snap::snapshot!(code, @"202 Accepted");
let res = index.wait_task(task.uid()).await;
meili_snap::snapshot!(res["status"], @r###""succeeded""###);
index
nested_index
.search(
json!({
"filter": "cattos = pésti"
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
assert_eq!(response["hits"][0]["id"], json!(852));
},
)
.await;
index
nested_index
.search(
json!({
"filter": "doggos.age > 5"
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
assert_eq!(response["hits"][0]["id"], json!(654));
assert_eq!(response["hits"][1]["id"], json!(951));
@ -82,7 +63,7 @@ async fn search_with_filter_array_notation() {
"filter": ["title = Gläss"]
}))
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
let (response, code) = index
@ -90,7 +71,7 @@ async fn search_with_filter_array_notation() {
"filter": [["title = Gläss", "title = \"Shazam!\"", "title = \"Escape Room\""]]
}))
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 3);
}
@ -116,7 +97,7 @@ async fn search_with_contains_filter() {
"filter": "title CONTAINS cap"
}))
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
}
@ -269,16 +250,14 @@ async fn search_with_pattern_filter_settings() {
#[actix_rt::test]
async fn search_with_pattern_filter_settings_scenario_1() {
let temp = TempDir::new().unwrap();
let server = Server::new_with_options(Opt { ..default_settings(temp.path()) }).await.unwrap();
let server = Server::new_shared();
eprintln!("Documents -> Settings -> test");
let index = server.index("test");
let index = server.unique_index();
let (task, code) = index.add_documents(NESTED_DOCUMENTS.clone(), None).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
snapshot!(response["status"], @r###""succeeded""###);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index
.update_settings(json!({"filterableAttributes": [{
@ -289,9 +268,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
}
}]}))
.await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
snapshot!(response["status"], @r###""succeeded""###);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
// Check if the Equality filter works
index
@ -335,7 +313,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -355,9 +333,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
}
}]}))
.await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
snapshot!(response["status"], @r###""succeeded""###);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
// Check if the Equality filter works
index
@ -467,9 +444,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
}
}]}))
.await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
snapshot!(response["status"], @r###""succeeded""###);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
// Check if the Equality filter returns an error
index
@ -481,7 +457,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -567,9 +543,8 @@ async fn search_with_pattern_filter_settings_scenario_1() {
}
}]}))
.await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
snapshot!(response["status"], @r###""succeeded""###);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
// Check if the Equality filter works
index
@ -613,7 +588,7 @@ async fn search_with_pattern_filter_settings_scenario_1() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -720,7 +695,7 @@ async fn test_filterable_attributes_priority() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Attribute `doggos.age` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:11 doggos.age > 2",
"message": "Index `[uuid]`: Attribute `doggos.age` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:11 doggos.age > 2",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -746,7 +721,7 @@ async fn test_filterable_attributes_priority() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Attribute `doggos` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:7 doggos EXISTS",
"message": "Index `[uuid]`: Attribute `doggos` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:7 doggos EXISTS",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"

View File

@ -1,55 +1,13 @@
use meili_snap::{json_string, snapshot};
use meilisearch_types::milli::constants::RESERVED_GEO_FIELD_NAME;
use once_cell::sync::Lazy;
use super::test_settings_documents_indexing_swapping_and_search;
use crate::common::{Server, Value};
use crate::common::shared_index_with_geo_documents;
use crate::json;
static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"id": 1,
"name": "Taco Truck",
"address": "444 Salsa Street, Burritoville",
"type": "Mexican",
"rating": 9,
"_geo": {
"lat": 34.0522,
"lng": -118.2437
}
},
{
"id": 2,
"name": "La Bella Italia",
"address": "456 Elm Street, Townsville",
"type": "Italian",
"rating": 9,
"_geo": {
"lat": "45.4777599",
"lng": "9.1967508"
}
},
{
"id": 3,
"name": "Crêpe Truck",
"address": "2 Billig Avenue, Rouenville",
"type": "French",
"rating": 10
}
])
});
#[actix_rt::test]
async fn geo_sort_with_geo_strings() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["_geo"])).await;
index.update_settings_sortable_attributes(json!(["_geo"])).await;
let (task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded();
let index = shared_index_with_geo_documents().await;
index
.search(
@ -58,7 +16,7 @@ async fn geo_sort_with_geo_strings() {
"sort": ["_geoPoint(0.0, 0.0):asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
},
)
.await;
@ -66,14 +24,7 @@ async fn geo_sort_with_geo_strings() {
#[actix_rt::test]
async fn geo_bounding_box_with_string_and_number() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["_geo"])).await;
index.update_settings_sortable_attributes(json!(["_geo"])).await;
let (ret, _code) = index.add_documents(documents, None).await;
index.wait_task(ret.uid()).await.succeeded();
let index = shared_index_with_geo_documents().await;
index
.search(
@ -81,7 +32,7 @@ async fn geo_bounding_box_with_string_and_number() {
"filter": "_geoBoundingBox([89, 179], [-89, -179])",
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -123,14 +74,7 @@ async fn geo_bounding_box_with_string_and_number() {
#[actix_rt::test]
async fn bug_4640() {
// https://github.com/meilisearch/meilisearch/issues/4640
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.update_settings_filterable_attributes(json!(["_geo"])).await;
let (ret, _code) = index.update_settings_sortable_attributes(json!(["_geo"])).await;
index.wait_task(ret.uid()).await.succeeded();
let index = shared_index_with_geo_documents().await;
// Sort the document with the second one first
index
@ -139,7 +83,7 @@ async fn bug_4640() {
"sort": ["_geoPoint(45.4777599, 9.1967508):asc"],
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -202,7 +146,7 @@ async fn geo_asc_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
&json!({"q": "jean"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -247,7 +191,7 @@ async fn geo_asc_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
&json!({"q": "bob"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -284,7 +228,7 @@ async fn geo_asc_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
&json!({"q": "intel"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -324,7 +268,7 @@ async fn geo_sort_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "sort"], "sortableAttributes": [RESERVED_GEO_FIELD_NAME]}),
&json!({"q": "jean", "sort": ["_geoPoint(0.0, 0.0):asc"]}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [

View File

@ -2,31 +2,31 @@ use meili_snap::snapshot;
use once_cell::sync::Lazy;
use crate::common::index::Index;
use crate::common::{Server, Value};
use crate::common::{Server, Shared, Value};
use crate::json;
async fn index_with_documents_user_provided<'a>(
server: &'a Server,
server: &'a Server<Shared>,
documents: &Value,
) -> Index<'a> {
let index = server.index("test");
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({ "embedders": {"default": {
"source": "userProvided",
"dimensions": 2}}} ))
.await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
index
}
async fn index_with_documents_hf<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
let index = server.index("test");
async fn index_with_documents_hf<'a>(server: &'a Server<Shared>, documents: &Value) -> Index<'a> {
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({ "embedders": {"default": {
@ -36,11 +36,11 @@ async fn index_with_documents_hf<'a>(server: &'a Server, documents: &Value) -> I
"documentTemplate": "{{doc.title}}, {{doc.desc}}"
}}} ))
.await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
index
}
@ -76,6 +76,48 @@ static SINGLE_DOCUMENT_VEC: Lazy<Value> = Lazy::new(|| {
}])
});
static TEST_DISTINCT_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
// for query "Captain Marvel" and vector [1.0, 1.0]
json!([
{
"id": 0,
"search": "Captain Planet",
"desc": "#2 for keyword search, #3 for hybrid search",
"_vectors": {
"default": [-1.0, 0.0],
},
"distinct": 0
},
{
"id": 1,
"search": "Captain Marvel",
"desc": "#1 for keyword search, #4 for hybrid search",
"_vectors": {
"default": [-1.0, -1.0],
},
"distinct": 1
},
{
"id": 2,
"search": "Some Captain at least",
"desc": "#3 for keyword search, #1 for hybrid search",
"_vectors": {
"default": [1.0, 1.0],
},
"distinct": 0
},
{
"id": 3,
"search": "Irrelevant Capitaine",
"desc": "#4 for keyword search, #2 for hybrid search",
"_vectors": {
"default": [1.0, 0.0],
},
"distinct": 1
},
])
});
static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
@ -97,8 +139,8 @@ static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -130,8 +172,8 @@ async fn simple_search() {
#[actix_rt::test]
async fn limit_offset() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -143,8 +185,8 @@ async fn limit_offset() {
snapshot!(response["semanticHitCount"], @"0");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -159,8 +201,8 @@ async fn limit_offset() {
#[actix_rt::test]
async fn simple_search_hf() {
let server = Server::new().await;
let index = index_with_documents_hf(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents_hf(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (response, code) = index
.search_post(
@ -211,8 +253,8 @@ async fn simple_search_hf() {
#[actix_rt::test]
async fn distribution_shift() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let search = json!({"q": "Captain", "vector": [1.0, 1.0], "showRankingScore": true, "hybrid": {"embedder": "default", "semanticRatio": 1.0}, "retrieveVectors": true});
let (response, code) = index.search_post(search.clone()).await;
@ -233,7 +275,7 @@ async fn distribution_shift() {
.await;
snapshot!(code, @"202 Accepted");
let response = server.wait_task(response.uid()).await;
let response = server.wait_task(response.uid()).await.succeeded();
snapshot!(response["details"], @r#"{"embedders":{"default":{"distribution":{"mean":0.998,"sigma":0.01}}}}"#);
let (response, code) = index.search_post(search).await;
@ -243,8 +285,8 @@ async fn distribution_shift() {
#[actix_rt::test]
async fn highlighter() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(json!({"q": "Captain Marvel", "vector": [1.0, 1.0],
@ -298,8 +340,8 @@ async fn highlighter() {
#[actix_rt::test]
async fn invalid_semantic_ratio() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -370,8 +412,8 @@ async fn invalid_semantic_ratio() {
#[actix_rt::test]
async fn single_document() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SINGLE_DOCUMENT_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SINGLE_DOCUMENT_VEC).await;
let (response, code) = index
.search_post(
@ -386,8 +428,8 @@ async fn single_document() {
#[actix_rt::test]
async fn query_combination() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
// search without query and vector, but with hybrid => still placeholder
let (response, code) = index
@ -493,10 +535,54 @@ async fn query_combination() {
snapshot!(response["semanticHitCount"], @"0");
}
// see <https://github.com/meilisearch/meilisearch/issues/5526>
#[actix_rt::test]
async fn distinct_is_applied() {
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &TEST_DISTINCT_DOCUMENTS).await;
let (response, code) = index.update_settings(json!({ "distinctAttribute": "distinct" } )).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await.succeeded();
// pure keyword
let (response, code) = index
.search_post(
json!({"q": "Captain Marvel", "vector": [1.0, 1.0], "hybrid": {"semanticRatio": 0.0, "embedder": "default"}}),
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response["hits"], @r###"[{"id":1,"search":"Captain Marvel","desc":"#1 for keyword search, #4 for hybrid search","distinct":1},{"id":0,"search":"Captain Planet","desc":"#2 for keyword search, #3 for hybrid search","distinct":0}]"###);
snapshot!(response["semanticHitCount"], @"null");
snapshot!(response["estimatedTotalHits"], @"2");
// pure semantic
let (response, code) = index
.search_post(
json!({"q": "Captain Marvel", "vector": [1.0, 1.0], "hybrid": {"semanticRatio": 1.0, "embedder": "default"}}),
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response["hits"], @r###"[{"id":2,"search":"Some Captain at least","desc":"#3 for keyword search, #1 for hybrid search","distinct":0},{"id":3,"search":"Irrelevant Capitaine","desc":"#4 for keyword search, #2 for hybrid search","distinct":1}]"###);
snapshot!(response["semanticHitCount"], @"2");
snapshot!(response["estimatedTotalHits"], @"2");
// hybrid
let (response, code) = index
.search_post(
json!({"q": "Captain Marvel", "vector": [1.0, 1.0], "hybrid": {"semanticRatio": 0.5, "embedder": "default"}}),
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response["hits"], @r###"[{"id":2,"search":"Some Captain at least","desc":"#3 for keyword search, #1 for hybrid search","distinct":0},{"id":1,"search":"Captain Marvel","desc":"#1 for keyword search, #4 for hybrid search","distinct":1}]"###);
snapshot!(response["semanticHitCount"], @"1");
snapshot!(response["estimatedTotalHits"], @"2");
}
#[actix_rt::test]
async fn retrieve_vectors() {
let server = Server::new().await;
let index = index_with_documents_hf(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents_hf(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (response, code) = index
.search_post(
@ -546,7 +632,7 @@ async fn retrieve_vectors() {
let (response, code) = index
.update_settings(json!({ "displayedAttributes": ["id", "title", "desc", "_vectors"]} ))
.await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
@ -596,7 +682,7 @@ async fn retrieve_vectors() {
// remove `_vectors` from displayed attributes
let (response, code) =
index.update_settings(json!({ "displayedAttributes": ["id", "title", "desc"]} )).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index

View File

@ -89,9 +89,9 @@ static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
index
.update_settings(
@ -147,23 +147,20 @@ async fn simple_search() {
.search(
json!({"q": "進撃", "locales": ["jpn"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(response, @r###"
snapshot!(response, @r#"
{
"hits": [
{
"id": 852
},
{
"id": 853
}
],
"query": "進撃",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 1
}
"###);
"#);
snapshot!(code, @"200 OK");
},
)
@ -172,23 +169,20 @@ async fn simple_search() {
// chinese
index
.search(json!({"q": "进击", "attributesToRetrieve": ["id"]}), |response, code| {
snapshot!(response, @r###"
snapshot!(response, @r#"
{
"hits": [
{
"id": 853
},
{
"id": 852
}
],
"query": "进击",
"processingTimeMs": "[duration]",
"limit": 20,
"offset": 0,
"estimatedTotalHits": 2
"estimatedTotalHits": 1
}
"###);
"#);
snapshot!(code, @"200 OK");
})
.await;
@ -196,9 +190,9 @@ async fn simple_search() {
#[actix_rt::test]
async fn force_locales() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -211,10 +205,10 @@ async fn force_locales() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -274,9 +268,9 @@ async fn force_locales() {
#[actix_rt::test]
async fn force_locales_with_pattern() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -289,10 +283,10 @@ async fn force_locales_with_pattern() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -352,9 +346,9 @@ async fn force_locales_with_pattern() {
#[actix_rt::test]
async fn force_locales_with_pattern_nested() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = NESTED_DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
@ -365,10 +359,10 @@ async fn force_locales_with_pattern_nested() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -423,9 +417,9 @@ async fn force_locales_with_pattern_nested() {
}
#[actix_rt::test]
async fn force_different_locales_with_pattern() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -440,10 +434,10 @@ async fn force_different_locales_with_pattern() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -499,9 +493,9 @@ async fn force_different_locales_with_pattern() {
#[actix_rt::test]
async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -518,10 +512,10 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -577,9 +571,9 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
#[actix_rt::test]
async fn auto_infer_locales_at_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -592,10 +586,10 @@ async fn auto_infer_locales_at_search() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -676,9 +670,9 @@ async fn auto_infer_locales_at_search() {
#[actix_rt::test]
async fn force_different_locales_with_pattern_nested() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = NESTED_DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
@ -691,10 +685,10 @@ async fn force_different_locales_with_pattern_nested() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -774,9 +768,9 @@ async fn force_different_locales_with_pattern_nested() {
#[actix_rt::test]
async fn settings_change() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = NESTED_DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded();
@ -789,10 +783,10 @@ async fn settings_change() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 1,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -852,10 +846,10 @@ async fn settings_change() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 2,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -906,9 +900,9 @@ async fn settings_change() {
#[actix_rt::test]
async fn invalid_locales() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
index
.update_settings(
@ -945,9 +939,9 @@ async fn invalid_locales() {
#[actix_rt::test]
async fn invalid_localized_attributes_rules() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let (response, _) = index
.update_settings(json!({
"localizedAttributes": [
@ -1015,19 +1009,19 @@ async fn invalid_localized_attributes_rules() {
#[actix_rt::test]
async fn simple_facet_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
"filterableAttributes": ["name_en", "name_ja", "name_zh"],
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -1073,9 +1067,9 @@ async fn simple_facet_search() {
#[actix_rt::test]
async fn facet_search_with_localized_attributes() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
@ -1086,10 +1080,10 @@ async fn facet_search_with_localized_attributes() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -1146,9 +1140,9 @@ async fn facet_search_with_localized_attributes() {
#[actix_rt::test]
async fn swedish_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = json!([
{"id": "tra1-1", "product": "trä"},
{"id": "tra2-1", "product": "traktor"},
@ -1269,9 +1263,9 @@ async fn swedish_search() {
#[actix_rt::test]
async fn german_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = json!([
{"id": 1, "product": "Interkulturalität"},
{"id": 2, "product": "Wissensorganisation"},

View File

@ -2,11 +2,11 @@ use meili_snap::snapshot;
use once_cell::sync::Lazy;
use crate::common::index::Index;
use crate::common::{Server, Value};
use crate::common::{Server, Shared, Value};
use crate::json;
async fn index_with_documents<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
let index = server.index("test");
async fn index_with_documents<'a>(server: &'a Server<Shared>, documents: &Value) -> Index<'a> {
let index = server.unique_index();
let (task, _status_code) = index.add_documents(documents.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -48,8 +48,8 @@ static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Captain Marvel", "matchingStrategy": "last", "attributesToRetrieve": ["id"]}), |response, code| {
@ -75,8 +75,8 @@ async fn simple_search() {
#[actix_rt::test]
async fn search_with_typo() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Capitain Marvel", "matchingStrategy": "last", "attributesToRetrieve": ["id"]}), |response, code| {
@ -102,8 +102,8 @@ async fn search_with_typo() {
#[actix_rt::test]
async fn search_with_unknown_word() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Captain Supercopter Marvel", "matchingStrategy": "last", "attributesToRetrieve": ["id"]}), |response, code| {

View File

@ -1,4 +1,4 @@
// This modules contains all the test concerning search. Each particular feature of the search
// This module contains all the test concerning search. Each particular feature of the search
// should be tested in its own module to isolate tests and keep the tests readable.
mod distinct;
@ -17,12 +17,11 @@ mod restrict_searchable;
mod search_queue;
use meili_snap::{json_string, snapshot};
use meilisearch::Opt;
use tempfile::TempDir;
use crate::common::{
default_settings, shared_index_with_documents, shared_index_with_nested_documents, Server,
Value, DOCUMENTS, FRUITS_DOCUMENTS, NESTED_DOCUMENTS, SCORE_DOCUMENTS, VECTOR_DOCUMENTS,
shared_index_with_documents, shared_index_with_nested_documents,
shared_index_with_score_documents, Server, Value, DOCUMENTS, FRUITS_DOCUMENTS,
NESTED_DOCUMENTS, SCORE_DOCUMENTS, VECTOR_DOCUMENTS,
};
use crate::json;
@ -32,46 +31,33 @@ async fn test_settings_documents_indexing_swapping_and_search(
query: &Value,
test: impl Fn(Value, actix_http::StatusCode) + std::panic::UnwindSafe + Clone,
) {
let temp = TempDir::new().unwrap();
let server = Server::new_with_options(Opt { ..default_settings(temp.path()) }).await.unwrap();
let server = Server::new_shared();
eprintln!("Documents -> Settings -> test");
let index = server.index("test");
let index = server.unique_index();
let (task, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.update_settings(settings.clone()).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
index.search(query.clone(), test.clone()).await;
let (task, code) = server.delete_index("test").await;
assert_eq!(code, 202, "{}", task);
let response = server.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
eprintln!("Settings -> Documents -> test");
let index = server.index("test");
let index = server.unique_index();
let (task, code) = index.update_settings(settings.clone()).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
assert_eq!(code, 202, "{task}");
index.wait_task(task.uid()).await.succeeded();
index.search(query.clone(), test.clone()).await;
let (task, code) = server.delete_index("test").await;
assert_eq!(code, 202, "{}", task);
let response = server.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
}
#[actix_rt::test]
@ -79,7 +65,7 @@ async fn simple_placeholder_search() {
let index = shared_index_with_documents().await;
index
.search(json!({}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
})
.await;
@ -87,7 +73,7 @@ async fn simple_placeholder_search() {
let index = shared_index_with_nested_documents().await;
index
.search(json!({}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 4);
})
.await;
@ -98,7 +84,7 @@ async fn simple_search() {
let index = shared_index_with_documents().await;
index
.search(json!({"q": "glass"}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
})
.await;
@ -106,7 +92,7 @@ async fn simple_search() {
let index = shared_index_with_nested_documents().await;
index
.search(json!({"q": "pésti"}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
})
.await;
@ -115,8 +101,8 @@ async fn simple_search() {
/// See <https://github.com/meilisearch/meilisearch/issues/5547>
#[actix_rt::test]
async fn bug_5547() {
let server = Server::new().await;
let index = server.index("big_fst");
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) = index.create(None).await;
index.wait_task(response.uid()).await.succeeded();
@ -135,22 +121,22 @@ async fn bug_5547() {
#[actix_rt::test]
async fn search_with_stop_word() {
// related to https://github.com/meilisearch/meilisearch/issues/4984
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (_, code) = index
.update_settings(json!({"stopWords": ["the", "The", "a", "an", "to", "in", "of"]}))
.await;
meili_snap::snapshot!(code, @"202 Accepted");
snapshot!(code, @"202 Accepted");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let (task, _code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded();
// prefix search
index
.search(json!({"q": "to the", "attributesToHighlight": ["title"], "attributesToRetrieve": ["title"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @"[]");
})
.await;
@ -158,7 +144,7 @@ async fn search_with_stop_word() {
// non-prefix search
index
.search(json!({"q": "to the ", "attributesToHighlight": ["title"], "attributesToRetrieve": ["title"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -200,13 +186,13 @@ async fn search_with_stop_word() {
#[actix_rt::test]
async fn search_with_typo_settings() {
// related to https://github.com/meilisearch/meilisearch/issues/5240
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (_, code) = index
.update_settings(json!({"typoTolerance": { "disableOnAttributes": ["title", "id"]}}))
.await;
meili_snap::snapshot!(code, @"202 Accepted");
snapshot!(code, @"202 Accepted");
let documents = DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
@ -214,7 +200,7 @@ async fn search_with_typo_settings() {
index
.search(json!({"q": "287947" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -234,11 +220,11 @@ async fn search_with_typo_settings() {
#[actix_rt::test]
async fn phrase_search_with_stop_word() {
// related to https://github.com/meilisearch/meilisearch/issues/3521
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (_, code) = index.update_settings(json!({"stopWords": ["the", "of"]})).await;
meili_snap::snapshot!(code, @"202 Accepted");
snapshot!(code, @"202 Accepted");
let documents = DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
@ -246,7 +232,7 @@ async fn phrase_search_with_stop_word() {
index
.search(json!({"q": "how \"to\" train \"the" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
})
.await;
@ -257,7 +243,7 @@ async fn negative_phrase_search() {
let index = shared_index_with_documents().await;
index
.search(json!({"q": "-\"train your dragon\"" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["hits"].as_array().unwrap();
assert_eq!(hits.len(), 4);
assert_eq!(hits[0]["id"], "287947");
@ -273,7 +259,7 @@ async fn negative_word_search() {
let index = shared_index_with_documents().await;
index
.search(json!({"q": "-escape" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["hits"].as_array().unwrap();
assert_eq!(hits.len(), 4);
assert_eq!(hits[0]["id"], "287947");
@ -286,7 +272,7 @@ async fn negative_word_search() {
// Everything that contains derivates of escape but not escape: nothing
index
.search(json!({"q": "-escape escape" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["hits"].as_array().unwrap();
assert_eq!(hits.len(), 0);
})
@ -298,7 +284,7 @@ async fn non_negative_search() {
let index = shared_index_with_documents().await;
index
.search(json!({"q": "- escape" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["hits"].as_array().unwrap();
assert_eq!(hits.len(), 1);
assert_eq!(hits[0]["id"], "522681");
@ -307,7 +293,7 @@ async fn non_negative_search() {
index
.search(json!({"q": "- \"train your dragon\"" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["hits"].as_array().unwrap();
assert_eq!(hits.len(), 1);
assert_eq!(hits[0]["id"], "166428");
@ -317,8 +303,8 @@ async fn non_negative_search() {
#[actix_rt::test]
async fn negative_special_cases_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
@ -331,7 +317,7 @@ async fn negative_special_cases_search() {
// There is a synonym for escape -> glass but we don't want "escape", only the derivates: glass
index
.search(json!({"q": "-escape escape" }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["hits"].as_array().unwrap();
assert_eq!(hits.len(), 1);
assert_eq!(hits[0]["id"], "450465");
@ -343,8 +329,8 @@ async fn negative_special_cases_search() {
#[cfg(not(feature = "chinese-pinyin"))]
#[actix_rt::test]
async fn test_kanji_language_detection() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{ "id": 0, "title": "The quick (\"brown\") fox can't jump 32.3 feet, right? Brr, it's 29.3°F!" },
@ -356,7 +342,7 @@ async fn test_kanji_language_detection() {
index
.search(json!({"q": "東京"}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
})
.await;
@ -365,8 +351,8 @@ async fn test_kanji_language_detection() {
#[cfg(feature = "default")]
#[actix_rt::test]
async fn test_thai_language() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
// We don't need documents, the issue is on the query side only.
let documents = json!([
@ -382,7 +368,7 @@ async fn test_thai_language() {
index
.search(json!({"q": "สบู"}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
})
.await;
}
@ -400,7 +386,7 @@ async fn search_multiple_params() {
"offset": 0,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
},
)
@ -417,7 +403,7 @@ async fn search_multiple_params() {
"offset": 0,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
},
)
@ -433,7 +419,7 @@ async fn search_with_sort_on_numbers() {
"sort": ["id:asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
},
)
@ -446,7 +432,7 @@ async fn search_with_sort_on_numbers() {
"sort": ["doggos.age:asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 4);
},
)
@ -462,7 +448,7 @@ async fn search_with_sort_on_strings() {
"sort": ["title:desc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
},
)
@ -475,7 +461,7 @@ async fn search_with_sort_on_strings() {
"sort": ["doggos.name:asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 4);
},
)
@ -490,7 +476,7 @@ async fn search_with_multiple_sort() {
"sort": ["id:asc", "title:desc"]
}))
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
}
@ -503,7 +489,7 @@ async fn search_facet_distribution() {
"facets": ["title"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 1);
assert!(dist.get("title").is_some());
@ -521,7 +507,7 @@ async fn search_facet_distribution() {
"facets": ["father"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 1);
assert_eq!(
@ -544,9 +530,9 @@ async fn search_facet_distribution() {
"facets": ["doggos.name"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 1, "{:?}", dist);
assert_eq!(dist.len(), 1, "{dist:?}");
assert_eq!(
dist["doggos.name"],
json!({ "bobby": 1, "buddy": 1, "gros bill": 1, "turbo": 1, "fast": 1})
@ -561,9 +547,9 @@ async fn search_facet_distribution() {
"facets": ["doggos"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 3, "{:?}", dist);
assert_eq!(dist.len(), 3, "{dist:?}");
assert_eq!(
dist["doggos.name"],
json!({ "bobby": 1, "buddy": 1, "gros bill": 1, "turbo": 1, "fast": 1})
@ -579,7 +565,7 @@ async fn search_facet_distribution() {
"facets": ["doggos.name"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 1);
assert_eq!(
@ -593,8 +579,8 @@ async fn search_facet_distribution() {
#[actix_rt::test]
async fn displayed_attributes() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings(json!({ "displayedAttributes": ["title"] })).await;
@ -604,14 +590,14 @@ async fn displayed_attributes() {
let (response, code) =
index.search_post(json!({ "attributesToRetrieve": ["title", "id"] })).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert!(response["hits"][0].get("title").is_some());
}
#[actix_rt::test]
async fn placeholder_search_is_hard_limited() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents: Vec<_> = (0..1200).map(|i| json!({ "id": i, "text": "I am unique!" })).collect();
let (task, _status_code) = index.add_documents(documents.into(), None).await;
@ -623,7 +609,7 @@ async fn placeholder_search_is_hard_limited() {
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1000);
},
)
@ -636,7 +622,7 @@ async fn placeholder_search_is_hard_limited() {
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
@ -652,7 +638,7 @@ async fn placeholder_search_is_hard_limited() {
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1200);
},
)
@ -665,7 +651,7 @@ async fn placeholder_search_is_hard_limited() {
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
@ -674,8 +660,8 @@ async fn placeholder_search_is_hard_limited() {
#[actix_rt::test]
async fn search_is_hard_limited() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents: Vec<_> = (0..1200).map(|i| json!({ "id": i, "text": "I am unique!" })).collect();
let (task, _status_code) = index.add_documents(documents.into(), None).await;
@ -688,7 +674,7 @@ async fn search_is_hard_limited() {
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1000);
},
)
@ -702,7 +688,7 @@ async fn search_is_hard_limited() {
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
@ -719,7 +705,7 @@ async fn search_is_hard_limited() {
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1200);
},
)
@ -733,7 +719,7 @@ async fn search_is_hard_limited() {
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
@ -742,8 +728,8 @@ async fn search_is_hard_limited() {
#[actix_rt::test]
async fn faceting_max_values_per_facet() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings(json!({ "filterableAttributes": ["number"] })).await;
@ -757,7 +743,7 @@ async fn faceting_max_values_per_facet() {
"facets": ["number"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let numbers = response["facetDistribution"]["number"].as_object().unwrap();
assert_eq!(numbers.len(), 100);
},
@ -774,7 +760,7 @@ async fn faceting_max_values_per_facet() {
"facets": ["number"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let numbers = &response["facetDistribution"]["number"].as_object().unwrap();
assert_eq!(numbers.len(), 10_000);
},
@ -784,13 +770,7 @@ async fn faceting_max_values_per_facet() {
#[actix_rt::test]
async fn test_score_details() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
let res = index.add_documents(json!(documents), None).await;
index.wait_task(res.0.uid()).await.succeeded();
let index = shared_index_with_documents().await;
index
.search(
@ -799,8 +779,8 @@ async fn test_score_details() {
"showRankingScoreDetails": true,
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "How to Train Your Dragon: The Hidden World",
@ -850,13 +830,7 @@ async fn test_score_details() {
#[actix_rt::test]
async fn test_score() {
let server = Server::new().await;
let index = server.index("test");
let documents = SCORE_DOCUMENTS.clone();
let res = index.add_documents(json!(documents), None).await;
index.wait_task(res.0.uid()).await.succeeded();
let index = shared_index_with_score_documents().await;
index
.search(
@ -865,8 +839,8 @@ async fn test_score() {
"showRankingScore": true,
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "Batman the dark knight returns: Part 1",
@ -903,13 +877,7 @@ async fn test_score() {
#[actix_rt::test]
async fn test_score_threshold() {
let query = "Badman dark returns 1";
let server = Server::new().await;
let index = server.index("test");
let documents = SCORE_DOCUMENTS.clone();
let res = index.add_documents(json!(documents), None).await;
index.wait_task(res.0.uid()).await.succeeded();
let index = shared_index_with_score_documents().await;
index
.search(
@ -919,9 +887,9 @@ async fn test_score_threshold() {
"rankingScoreThreshold": 0.0
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["estimatedTotalHits"]), @"5");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["estimatedTotalHits"]), @"5");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "Batman the dark knight returns: Part 1",
@ -962,9 +930,9 @@ async fn test_score_threshold() {
"rankingScoreThreshold": 0.2
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["estimatedTotalHits"]), @r###"3"###);
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["estimatedTotalHits"]), @r###"3"###);
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "Batman the dark knight returns: Part 1",
@ -995,9 +963,9 @@ async fn test_score_threshold() {
"rankingScoreThreshold": 0.5
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["estimatedTotalHits"]), @r###"2"###);
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["estimatedTotalHits"]), @r###"2"###);
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "Batman the dark knight returns: Part 1",
@ -1023,9 +991,9 @@ async fn test_score_threshold() {
"rankingScoreThreshold": 0.8
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["estimatedTotalHits"]), @r###"1"###);
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["estimatedTotalHits"]), @r###"1"###);
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "Batman the dark knight returns: Part 1",
@ -1046,10 +1014,10 @@ async fn test_score_threshold() {
"rankingScoreThreshold": 1.0
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["estimatedTotalHits"]), @r###"0"###);
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["estimatedTotalHits"]), @r###"0"###);
// nobody is perfect
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @"[]");
snapshot!(json_string!(response["hits"]), @"[]");
},
)
.await;
@ -1057,8 +1025,8 @@ async fn test_score_threshold() {
#[actix_rt::test]
async fn test_degraded_score_details() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = NESTED_DOCUMENTS.clone();
@ -1075,8 +1043,8 @@ async fn test_degraded_score_details() {
"showRankingScoreDetails": true,
}),
|response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response, { ".processingTimeMs" => "[duration]" }), @r###"
{
"hits": [
{
@ -1146,8 +1114,8 @@ async fn test_degraded_score_details() {
#[cfg(feature = "default")]
#[actix_rt::test]
async fn camelcased_words() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
// related to https://github.com/meilisearch/meilisearch/issues/3818
let documents = json!([
@ -1162,8 +1130,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "deLonghi"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 0,
@ -1180,8 +1148,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "dellonghi"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 0,
@ -1198,8 +1166,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "testa"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
@ -1220,8 +1188,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "testab"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
@ -1242,8 +1210,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "TestaB"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
@ -1264,8 +1232,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "Testab"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
@ -1286,8 +1254,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "TestAb"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
@ -1309,8 +1277,8 @@ async fn camelcased_words() {
// with Typos
index
.search(json!({"q": "dellonghi"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 0,
@ -1327,8 +1295,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "TetsAB"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
@ -1349,8 +1317,8 @@ async fn camelcased_words() {
index
.search(json!({"q": "TetsAB"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 2,
@ -1372,13 +1340,13 @@ async fn camelcased_words() {
#[actix_rt::test]
async fn simple_search_with_strange_synonyms() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.update_settings(json!({ "synonyms": {"&": ["to"], "to": ["&"]} })).await;
let r = index.wait_task(task.uid()).await;
meili_snap::snapshot!(r["status"], @r###""succeeded""###);
let r = index.wait_task(task.uid()).await.succeeded();
snapshot!(r["status"], @r###""succeeded""###);
let documents = DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
@ -1386,8 +1354,8 @@ async fn simple_search_with_strange_synonyms() {
index
.search(json!({"q": "How to train"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "How to Train Your Dragon: The Hidden World",
@ -1404,8 +1372,8 @@ async fn simple_search_with_strange_synonyms() {
index
.search(json!({"q": "How & train"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "How to Train Your Dragon: The Hidden World",
@ -1422,8 +1390,8 @@ async fn simple_search_with_strange_synonyms() {
index
.search(json!({"q": "to"}), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"title": "How to Train Your Dragon: The Hidden World",
@ -1441,8 +1409,8 @@ async fn simple_search_with_strange_synonyms() {
#[actix_rt::test]
async fn change_attributes_settings() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings(json!({ "searchableAttributes": ["father", "mother"] })).await;
@ -1462,8 +1430,8 @@ async fn change_attributes_settings() {
"attributesToRetrieve": ["id", "doggos"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 852,
@ -1493,8 +1461,8 @@ async fn change_attributes_settings() {
"attributesToRetrieve": ["id", "doggos"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
meili_snap::snapshot!(meili_snap::json_string!(response["hits"]), @r###"
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
"id": 852,
@ -1563,7 +1531,7 @@ async fn test_nested_fields() {
&settings,
&json!({"q": "document"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1609,7 +1577,7 @@ async fn test_nested_fields() {
&settings,
&json!({"q": "zeroth"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1627,7 +1595,7 @@ async fn test_nested_fields() {
&settings,
&json!({"q": "first"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1650,7 +1618,7 @@ async fn test_nested_fields() {
&settings,
&json!({"q": "field"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1686,7 +1654,7 @@ async fn test_nested_fields() {
&settings,
&json!({"q": "array"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
// nested is not searchable
snapshot!(json_string!(response["hits"]), @"[]");
},
@ -1698,7 +1666,7 @@ async fn test_nested_fields() {
&settings,
&json!({"q": "lied"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
// nested is not searchable
snapshot!(json_string!(response["hits"]), @"[]");
},
@ -1711,7 +1679,7 @@ async fn test_nested_fields() {
&settings,
&json!({"filter": "nested.object = field"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1747,7 +1715,7 @@ async fn test_nested_fields() {
&settings,
&json!({"filter": "nested.machin = bidule"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1770,10 +1738,10 @@ async fn test_nested_fields() {
&settings,
&json!({"filter": "nested = array"}),
|response, code| {
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Attribute `nested` is not filterable. Available filterable attribute patterns are: `nested.machin`, `nested.object`, `title`.\n1:7 nested = array",
"message": "Index `[uuid]`: Attribute `nested` is not filterable. Available filterable attribute patterns are: `nested.machin`, `nested.object`, `title`.\n1:7 nested = array",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -1789,10 +1757,10 @@ async fn test_nested_fields() {
&settings,
&json!({"filter": r#"nested = "I lied""#}),
|response, code| {
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Attribute `nested` is not filterable. Available filterable attribute patterns are: `nested.machin`, `nested.object`, `title`.\n1:7 nested = \"I lied\"",
"message": "Index `[uuid]`: Attribute `nested` is not filterable. Available filterable attribute patterns are: `nested.machin`, `nested.object`, `title`.\n1:7 nested = \"I lied\"",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -1850,7 +1818,7 @@ async fn test_typo_settings() {
}),
&json!({"q": "document"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1902,7 +1870,7 @@ async fn test_typo_settings() {
}),
&json!({"q": "docume"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["hits"]), @r###"
[
{
@ -1946,8 +1914,8 @@ async fn test_typo_settings() {
/// Modifying facets with different casing should work correctly
#[actix_rt::test]
async fn change_facet_casing() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -1955,7 +1923,7 @@ async fn change_facet_casing() {
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
.add_documents(
@ -1968,7 +1936,7 @@ async fn change_facet_casing() {
None,
)
.await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
.add_documents(
@ -1981,12 +1949,12 @@ async fn change_facet_casing() {
None,
)
.await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
index
.search(json!({ "facets": ["dog"] }), |response, code| {
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(meili_snap::json_string!(response["facetDistribution"]), @r###"
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["facetDistribution"]), @r###"
{
"dog": {
"bouvier bernois": 1

File diff suppressed because it is too large Load Diff

View File

@ -2296,6 +2296,7 @@ async fn error_remote_500_once() {
}
#[actix_rt::test]
#[ignore]
async fn error_remote_timeout() {
let ms0 = Server::new().await;
let ms1 = Server::new().await;

View File

@ -7,7 +7,7 @@ async fn default_search_should_return_estimated_total_hit() {
let index = shared_index_with_documents().await;
index
.search(json!({}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert!(response.get("estimatedTotalHits").is_some());
assert!(response.get("limit").is_some());
assert!(response.get("offset").is_some());
@ -25,7 +25,7 @@ async fn simple_search() {
let index = shared_index_with_documents().await;
index
.search(json!({"page": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
assert!(response.get("totalHits").is_some());
assert_eq!(response["page"], 1);
@ -44,7 +44,7 @@ async fn page_zero_should_not_return_any_result() {
let index = shared_index_with_documents().await;
index
.search(json!({"page": 0}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 0);
assert!(response.get("totalHits").is_some());
assert_eq!(response["page"], 0);
@ -58,7 +58,7 @@ async fn hits_per_page_1() {
let index = shared_index_with_documents().await;
index
.search(json!({"hitsPerPage": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
assert_eq!(response["totalHits"], 5);
assert_eq!(response["page"], 1);
@ -72,7 +72,7 @@ async fn hits_per_page_0_should_not_return_any_result() {
let index = shared_index_with_documents().await;
index
.search(json!({"hitsPerPage": 0}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 0);
assert_eq!(response["totalHits"], 5);
assert_eq!(response["page"], 1);
@ -126,7 +126,7 @@ async fn ensure_placeholder_search_hit_count_valid() {
for page in 0..=4 {
index
.search(json!({"page": page, "hitsPerPage": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["totalHits"], 4);
assert_eq!(response["totalPages"], 4);
})

View File

@ -2,11 +2,11 @@ use meili_snap::{json_string, snapshot};
use once_cell::sync::Lazy;
use crate::common::index::Index;
use crate::common::{Server, Value};
use crate::common::{Server, Shared, Value};
use crate::json;
async fn index_with_documents<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
let index = server.index("test");
async fn index_with_documents<'a>(server: &'a Server<Shared>, documents: &Value) -> Index<'a> {
let index = server.unique_index();
let (task, _code) = index.add_documents(documents.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -34,8 +34,8 @@ static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search_on_title() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search should return 2 documents (ids: 2 and 3).
index
@ -51,8 +51,8 @@ async fn simple_search_on_title() {
#[actix_rt::test]
async fn search_no_searchable_attribute_set() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(
@ -93,8 +93,8 @@ async fn search_no_searchable_attribute_set() {
#[actix_rt::test]
async fn search_on_all_attributes() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Captain Marvel", "attributesToSearchOn": ["*"]}), |response, code| {
@ -106,8 +106,8 @@ async fn search_on_all_attributes() {
#[actix_rt::test]
async fn search_on_all_attributes_restricted_set() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (task, _status_code) = index.update_settings_searchable_attributes(json!(["title"])).await;
index.wait_task(task.uid()).await.succeeded();
@ -121,8 +121,8 @@ async fn search_on_all_attributes_restricted_set() {
#[actix_rt::test]
async fn simple_prefix_search_on_title() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search should return 2 documents (ids: 2 and 3).
index
@ -135,8 +135,8 @@ async fn simple_prefix_search_on_title() {
#[actix_rt::test]
async fn simple_search_on_title_matching_strategy_all() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search matching strategy all should only return 1 document (ids: 2).
index
.search(json!({"q": "Captain Marvel", "attributesToSearchOn": ["title"], "matchingStrategy": "all"}), |response, code| {
@ -148,8 +148,8 @@ async fn simple_search_on_title_matching_strategy_all() {
#[actix_rt::test]
async fn simple_search_on_no_field() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search on no field shouldn't return any document.
index
.search(json!({"q": "Captain Marvel", "attributesToSearchOn": []}), |response, code| {
@ -161,8 +161,8 @@ async fn simple_search_on_no_field() {
#[actix_rt::test]
async fn word_ranking_rule_order() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// Document 3 should appear before document 2.
index
@ -189,8 +189,8 @@ async fn word_ranking_rule_order() {
#[actix_rt::test]
async fn word_ranking_rule_order_exact_words() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (task, _status_code) = index
.update_settings_typo_tolerance(json!({"disableOnWords": ["Captain", "Marvel"]}))
.await;
@ -221,9 +221,9 @@ async fn word_ranking_rule_order_exact_words() {
#[actix_rt::test]
async fn typo_ranking_rule_order() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Capitain Marivel",
@ -260,9 +260,9 @@ async fn typo_ranking_rule_order() {
#[actix_rt::test]
async fn attributes_ranking_rule_order() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Captain Marvel",
@ -301,9 +301,9 @@ async fn attributes_ranking_rule_order() {
#[actix_rt::test]
async fn exactness_ranking_rule_order() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Captain Marvel",
@ -340,9 +340,9 @@ async fn exactness_ranking_rule_order() {
#[actix_rt::test]
async fn search_on_exact_field() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Captain Marvel",
@ -359,7 +359,7 @@ async fn search_on_exact_field() {
let (response, code) =
index.update_settings_typo_tolerance(json!({ "disableOnAttributes": ["exact"] })).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
// Searching on an exact attribute should only return the document matching without typo.
index
@ -372,7 +372,7 @@ async fn search_on_exact_field() {
#[actix_rt::test]
async fn phrase_search_on_title() {
let server = Server::new().await;
let server = Server::new_shared();
let documents = json!([
{ "id": 8, "desc": "Document Review", "title": "Document Review Specialist II" },
{ "id": 5, "desc": "Document Review", "title": "Document Review Attorney" },
@ -383,7 +383,7 @@ async fn phrase_search_on_title() {
{ "id": 7, "desc": "Document Review", "title": "Document Review Specialist II" },
{ "id": 6, "desc": "Document Review", "title": "Document Review (Entry Level)" }
]);
let index = index_with_documents(&server, &documents).await;
let index = index_with_documents(server, &documents).await;
index
.search(
@ -416,3 +416,381 @@ async fn phrase_search_on_title() {
)
.await;
}
static NESTED_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"details": {
"title": "Shazam!",
"desc": "a Captain Marvel ersatz",
"weaknesses": ["magic", "requires transformation"],
"outfit": {
"has_cape": true,
"colors": {
"primary": "red",
"secondary": "gold"
}
}
},
"id": "1",
},
{
"details": {
"title": "Captain Planet",
"desc": "He's not part of the Marvel Cinematic Universe",
"blue_skin": true,
"outfit": {
"has_cape": false
}
},
"id": "2",
},
{
"details": {
"title": "Captain Marvel",
"desc": "a Shazam ersatz",
"weaknesses": ["magic", "power instability"],
"outfit": {
"has_cape": false
}
},
"id": "3",
}])
});
#[actix_rt::test]
async fn nested_search_on_title_with_prefix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Wildcard should match to 'details.' attribute
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["*.title"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "2"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_search_with_suffix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Wildcard should match to any attribute inside 'details.'
// It's worth noting the difference between 'details.*' and '*.title'
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["details.*"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "1"
},
{
"id": "2"
}
]"###);
},
)
.await;
// Should return 1 document (ids: 1)
index
.search(
json!({"q": "gold", "attributesToSearchOn": ["details.*"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "1"
}
]"###);
},
)
.await;
// Should return 2 documents (ids: 1 and 2)
index
.search(
json!({"q": "true", "attributesToSearchOn": ["details.*"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "1"
},
{
"id": "2"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_search_on_title_restricted_set_with_suffix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
let (task, _status_code) =
index.update_settings_searchable_attributes(json!(["details.title"])).await;
index.wait_task(task.uid()).await.succeeded();
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["details.*"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "2"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_search_no_searchable_attribute_set_with_any_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["unknown.*", "*.unknown"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(response["hits"].as_array().unwrap().len(), @"0");
},
)
.await;
let (task, _status_code) = index.update_settings_searchable_attributes(json!(["*"])).await;
index.wait_task(task.uid()).await.succeeded();
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["unknown.*", "*.unknown"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(response["hits"].as_array().unwrap().len(), @"0");
},
)
.await;
let (task, _status_code) = index.update_settings_searchable_attributes(json!(["*"])).await;
index.wait_task(task.uid()).await.succeeded();
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["unknown.*", "*.unknown", "*.title"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "2"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_prefix_search_on_title_with_prefix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Nested prefix search with prefix wildcard should return 2 documents (ids: 2 and 3).
index
.search(
json!({"q": "Captain Mar", "attributesToSearchOn": ["*.title"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "2"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_prefix_search_on_details_with_suffix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
index
.search(
json!({"q": "Captain Mar", "attributesToSearchOn": ["details.*"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "1"
},
{
"id": "2"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_prefix_search_on_weaknesses_with_suffix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Wildcard search on nested weaknesses should return 2 documents (ids: 1 and 3)
index
.search(
json!({"q": "mag", "attributesToSearchOn": ["details.*"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "1"
},
{
"id": "3"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_search_on_title_matching_strategy_all() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Nested search matching strategy all should only return 1 document (ids: 3)
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["*.title"], "matchingStrategy": "all", "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
}
]"###);
},
)
.await;
}
#[actix_rt::test]
async fn nested_attributes_ranking_rule_order_with_prefix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Document 3 should appear before documents 1 and 2
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["*.desc", "*.title"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "1"
},
{
"id": "2"
}
]
"###
);
},
)
.await;
}
#[actix_rt::test]
async fn nested_attributes_ranking_rule_order_with_suffix_wildcard() {
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Document 3 should appear before documents 1 and 2
index
.search(
json!({"q": "Captain Marvel", "attributesToSearchOn": ["details.*"], "attributesToRetrieve": ["id"]}),
|response, code| {
snapshot!(code, @"200 OK");
snapshot!(json_string!(response["hits"]),
@r###"
[
{
"id": "3"
},
{
"id": "1"
},
{
"id": "2"
}
]
"###
);
},
)
.await;
}

View File

@ -4,7 +4,7 @@ source: crates/meilisearch/tests/search/distinct.rs
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "tamo",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,

View File

@ -3,8 +3,8 @@ use crate::json;
#[actix_rt::test]
async fn set_and_reset_distinct_attribute() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task1, _code) = index.update_settings(json!({ "distinctAttribute": "test"})).await;
index.wait_task(task1.uid()).await.succeeded();
@ -24,8 +24,8 @@ async fn set_and_reset_distinct_attribute() {
#[actix_rt::test]
async fn set_and_reset_distinct_attribute_with_dedicated_route() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (update_task1, _code) = index.update_distinct_attribute(json!("test")).await;
index.wait_task(update_task1.uid()).await.succeeded();

View File

@ -11,59 +11,62 @@ macro_rules! test_setting_routes {
#[actix_rt::test]
async fn get_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (_response, code) = server.service.get(url).await;
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn update_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.$update_verb(url, serde_json::Value::Null.into()).await;
assert_eq!(code, 202, "{}", response);
server.index("").wait_task(0).await;
let (response, code) = server.index("test").get().await;
assert_eq!(code, 200, "{}", response);
}
#[actix_rt::test]
async fn delete_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (_, code) = server.service.delete(url).await;
assert_eq!(code, 202);
let response = server.index("").wait_task(0).await;
assert_eq!(response["status"], "failed");
}
#[actix_rt::test]
async fn get_default() {
let server = Server::new().await;
let index = server.index("test");
let (response, code) = index.create(None).await;
assert_eq!(code, 202, "{}", response);
index.wait_task(0).await;
let url = format!("/indexes/test/settings/{}",
let server = Server::new_shared();
let index_name = uuid::Uuid::new_v4().to_string();
let url = format!("/indexes/{index_name}/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.get(url).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 404, "{response}");
}
#[actix_rt::test]
async fn update_unexisting_index() {
let server = Server::new_shared();
let index_name = uuid::Uuid::new_v4().to_string();
let url = format!("/indexes/{index_name}/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.$update_verb(url, serde_json::Value::Null.into()).await;
assert_eq!(code, 202, "{response}");
let (response, code) = server.service.get(format!("/indixes/{index_name}")).await;
assert_eq!(code, 404, "{response}");
}
#[actix_rt::test]
async fn delete_unexisting_index() {
let server = Server::new_shared();
let index_name = uuid::Uuid::new_v4().to_string();
let url = format!("/indexes/{index_name}/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.delete(url).await;
assert_eq!(code, 202, "{response}");
let (response, code) = server.service.get(format!("/indixes/{index_name}")).await;
assert_eq!(code, 404, "{response}");
}
#[actix_rt::test]
async fn get_default() {
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.create(None).await;
assert_eq!(code, 202, "{response}");
index.wait_task(response.uid()).await.succeeded();
let url = format!("/indexes/{}/settings/{}",
index.uid,
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.get(url).await;
assert_eq!(code, 200, "{response}");
let expected = crate::json!($default_value);
assert_eq!(expected, response);
}
@ -195,15 +198,16 @@ test_setting_routes!(
#[actix_rt::test]
async fn get_settings_unexisting_index() {
let server = Server::new().await;
let (response, code) = server.index("test").settings().await;
assert_eq!(code, 404, "{}", response)
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.settings().await;
assert_eq!(code, 404, "{response}")
}
#[actix_rt::test]
async fn get_settings() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) = index.create(None).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.settings().await;
@ -247,9 +251,8 @@ async fn get_settings() {
#[actix_rt::test]
async fn secrets_are_hidden_in_settings() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) = index.create(None).await;
index.wait_task(response.uid()).await.succeeded();
@ -269,11 +272,11 @@ async fn secrets_are_hidden_in_settings() {
.await;
meili_snap::snapshot!(code, @"202 Accepted");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
meili_snap::snapshot!(meili_snap::json_string!(response, { ".taskUid" => "[task_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
{
"taskUid": 1,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -282,7 +285,7 @@ async fn secrets_are_hidden_in_settings() {
let settings_update_uid = response.uid();
index.wait_task(settings_update_uid).await;
index.wait_task(settings_update_uid).await.succeeded();
let (response, code) = index.settings().await;
meili_snap::snapshot!(code, @"200 OK");
@ -370,16 +373,16 @@ async fn secrets_are_hidden_in_settings() {
#[actix_rt::test]
async fn error_update_settings_unknown_field() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (_response, code) = index.update_settings(json!({"foo": 12})).await;
assert_eq!(code, 400);
}
#[actix_rt::test]
async fn test_partial_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.update_settings(json!({"displayedAttributes": ["foo"]})).await;
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.settings().await;
@ -398,20 +401,18 @@ async fn test_partial_update() {
#[actix_rt::test]
async fn error_delete_settings_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.delete_settings().await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
index.wait_task(task.uid()).await.failed();
}
#[actix_rt::test]
async fn reset_all_settings() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{
@ -423,7 +424,6 @@ async fn reset_all_settings() {
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
assert_eq!(response["taskUid"], 0);
index.wait_task(response.uid()).await.succeeded();
let (update_task,_status_code) = index
@ -456,17 +456,15 @@ async fn reset_all_settings() {
#[actix_rt::test]
async fn update_setting_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.update_settings(json!({})).await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
let (_response, code) = index.get().await;
assert_eq!(code, 200);
let (task, _status_code) = index.delete_settings().await;
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
}
#[actix_rt::test]
@ -487,8 +485,8 @@ async fn error_update_setting_unexisting_index_invalid_uid() {
#[actix_rt::test]
async fn error_set_invalid_ranking_rules() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.create(None).await;
let (response, code) = index.update_settings(json!({ "rankingRules": [ "manyTheFish"]})).await;
@ -505,8 +503,8 @@ async fn error_set_invalid_ranking_rules() {
#[actix_rt::test]
async fn set_and_reset_distinct_attribute_with_dedicated_route() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.update_distinct_attribute(json!("test")).await;
index.wait_task(task.uid()).await.succeeded();
@ -526,8 +524,8 @@ async fn set_and_reset_distinct_attribute_with_dedicated_route() {
#[actix_rt::test]
async fn granular_filterable_attributes() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.create(None).await;
let (response, code) =
@ -545,7 +543,7 @@ async fn granular_filterable_attributes() {
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.settings().await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["filterableAttributes"]), @r###"
[
{

View File

@ -26,8 +26,8 @@ static DOCUMENTS: Lazy<crate::common::Value> = Lazy::new(|| {
#[actix_rt::test]
async fn attribute_scale_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -38,7 +38,7 @@ async fn attribute_scale_search() {
"rankingRules": ["words", "typo", "proximity"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
// the expected order is [1, 3, 2] instead of [3, 1, 2]
@ -99,8 +99,8 @@ async fn attribute_scale_search() {
#[actix_rt::test]
async fn attribute_scale_phrase_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -167,8 +167,8 @@ async fn attribute_scale_phrase_search() {
#[actix_rt::test]
async fn word_scale_set_and_reset() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -282,8 +282,8 @@ async fn word_scale_set_and_reset() {
#[actix_rt::test]
async fn attribute_scale_default_ranking_rules() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -293,7 +293,7 @@ async fn attribute_scale_default_ranking_rules() {
"proximityPrecision": "byAttribute"
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
// the expected order is [3, 1, 2]

View File

@ -5,8 +5,8 @@ use crate::json;
#[actix_rt::test]
async fn set_and_reset() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index
.update_settings(json!({
@ -70,8 +70,8 @@ async fn set_and_search() {
},
]);
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (add_task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(add_task.uid()).await.succeeded();
@ -224,8 +224,8 @@ async fn advanced_synergies() {
},
]);
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (add_task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(add_task.uid()).await.succeeded();

View File

@ -6,11 +6,11 @@ use crate::json;
#[actix_rt::test]
async fn similar_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let expected_response = json!({
"message": "Index `test` not found.",
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -26,12 +26,12 @@ async fn similar_unexisting_index() {
#[actix_rt::test]
async fn similar_unexisting_parameter() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index
.similar(json!({"id": 287947, "marin": "hello"}), |response, code| {
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
assert_eq!(response["code"], "bad_request");
})
.await;
@ -39,8 +39,8 @@ async fn similar_unexisting_parameter() {
#[actix_rt::test]
async fn similar_bad_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -53,7 +53,7 @@ async fn similar_bad_id() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) = index.similar_post(json!({"id": ["doggo"], "embedder": "manual"})).await;
snapshot!(code, @"400 Bad Request");
@ -69,8 +69,8 @@ async fn similar_bad_id() {
#[actix_rt::test]
async fn similar_bad_ranking_score_threshold() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -83,7 +83,7 @@ async fn similar_bad_ranking_score_threshold() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) = index.similar_post(json!({"rankingScoreThreshold": ["doggo"]})).await;
snapshot!(code, @"400 Bad Request");
@ -99,8 +99,8 @@ async fn similar_bad_ranking_score_threshold() {
#[actix_rt::test]
async fn similar_invalid_ranking_score_threshold() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -113,7 +113,7 @@ async fn similar_invalid_ranking_score_threshold() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) = index.similar_post(json!({"rankingScoreThreshold": 42})).await;
snapshot!(code, @"400 Bad Request");
@ -129,8 +129,8 @@ async fn similar_invalid_ranking_score_threshold() {
#[actix_rt::test]
async fn similar_invalid_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -143,7 +143,7 @@ async fn similar_invalid_id() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": "http://invalid-docid/", "embedder": "manual"})).await;
@ -160,8 +160,8 @@ async fn similar_invalid_id() {
#[actix_rt::test]
async fn similar_not_found_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -174,7 +174,7 @@ async fn similar_not_found_id() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": "definitely-doesnt-exist", "embedder": "manual"})).await;
@ -191,8 +191,8 @@ async fn similar_not_found_id() {
#[actix_rt::test]
async fn similar_bad_offset() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -205,7 +205,7 @@ async fn similar_bad_offset() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": 287947, "offset": "doggo", "embedder": "manual"})).await;
@ -233,8 +233,8 @@ async fn similar_bad_offset() {
#[actix_rt::test]
async fn similar_bad_limit() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -247,7 +247,7 @@ async fn similar_bad_limit() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": 287947, "limit": "doggo", "embedder": "manual"})).await;
@ -277,8 +277,8 @@ async fn similar_bad_limit() {
async fn similar_bad_filter() {
// Since a filter is deserialized as a json Value it will never fail to deserialize.
// Thus the error message is not generated by deserr but written by us.
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -291,7 +291,7 @@ async fn similar_bad_filter() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
snapshot!(code, @"202 Accepted");
@ -316,8 +316,8 @@ async fn similar_bad_filter() {
#[actix_rt::test]
async fn filter_invalid_syntax_object() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -330,7 +330,7 @@ async fn filter_invalid_syntax_object() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -354,8 +354,8 @@ async fn filter_invalid_syntax_object() {
#[actix_rt::test]
async fn filter_invalid_syntax_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -368,7 +368,7 @@ async fn filter_invalid_syntax_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -392,8 +392,8 @@ async fn filter_invalid_syntax_array() {
#[actix_rt::test]
async fn filter_invalid_syntax_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -406,7 +406,7 @@ async fn filter_invalid_syntax_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -432,8 +432,8 @@ async fn filter_invalid_syntax_string() {
#[actix_rt::test]
async fn filter_invalid_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -446,7 +446,7 @@ async fn filter_invalid_attribute_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -473,8 +473,8 @@ async fn filter_invalid_attribute_array() {
#[actix_rt::test]
async fn filter_invalid_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -487,7 +487,7 @@ async fn filter_invalid_attribute_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -514,8 +514,8 @@ async fn filter_invalid_attribute_string() {
#[actix_rt::test]
async fn filter_reserved_geo_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -528,7 +528,7 @@ async fn filter_reserved_geo_attribute_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -554,8 +554,8 @@ async fn filter_reserved_geo_attribute_array() {
#[actix_rt::test]
async fn filter_reserved_geo_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -568,7 +568,7 @@ async fn filter_reserved_geo_attribute_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -594,8 +594,8 @@ async fn filter_reserved_geo_attribute_string() {
#[actix_rt::test]
async fn filter_reserved_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -608,7 +608,7 @@ async fn filter_reserved_attribute_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -634,8 +634,8 @@ async fn filter_reserved_attribute_array() {
#[actix_rt::test]
async fn filter_reserved_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -648,7 +648,7 @@ async fn filter_reserved_attribute_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -674,8 +674,8 @@ async fn filter_reserved_attribute_string() {
#[actix_rt::test]
async fn filter_reserved_geo_point_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -688,7 +688,7 @@ async fn filter_reserved_geo_point_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -714,8 +714,8 @@ async fn filter_reserved_geo_point_array() {
#[actix_rt::test]
async fn filter_reserved_geo_point_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -728,7 +728,7 @@ async fn filter_reserved_geo_point_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -754,8 +754,8 @@ async fn filter_reserved_geo_point_string() {
#[actix_rt::test]
async fn similar_bad_retrieve_vectors() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) =
index.similar_post(json!({"retrieveVectors": "doggo", "embedder": "manual"})).await;
@ -806,8 +806,8 @@ async fn similar_bad_retrieve_vectors() {
#[actix_rt::test]
async fn similar_bad_embedder() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -820,7 +820,7 @@ async fn similar_bad_embedder() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;

View File

@ -47,8 +47,8 @@ static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn basic() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index_with_prefix("test");
let (response, code) = index
.update_settings(json!({
@ -61,12 +61,12 @@ async fn basic() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded();
server.wait_task(value.uid()).await.succeeded();
index
.similar(
@ -233,8 +233,8 @@ async fn basic() {
#[actix_rt::test]
async fn ranking_score_threshold() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index_with_prefix("test");
let (response, code) = index
.update_settings(json!({
@ -247,12 +247,12 @@ async fn ranking_score_threshold() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded();
server.wait_task(value.uid()).await.succeeded();
index
.similar(
@ -503,8 +503,8 @@ async fn ranking_score_threshold() {
#[actix_rt::test]
async fn filter() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index_with_prefix("test");
let (response, code) = index
.update_settings(json!({
@ -517,12 +517,12 @@ async fn filter() {
"filterableAttributes": ["title", "release_year"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded();
server.wait_task(value.uid()).await.succeeded();
index
.similar(
@ -621,8 +621,8 @@ async fn filter() {
#[actix_rt::test]
async fn limit_and_offset() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index_with_prefix("test");
let (response, code) = index
.update_settings(json!({
@ -635,12 +635,12 @@ async fn limit_and_offset() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
snapshot!(code, @"202 Accepted");
index.wait_task(value.uid()).await.succeeded();
server.wait_task(value.uid()).await.succeeded();
index
.similar(

View File

@ -6,8 +6,8 @@ use crate::common::Server;
use crate::json;
#[actix_rt::test]
async fn get_settings_unexisting_index() {
let server = Server::new().await;
async fn get_version() {
let server = Server::new_shared();
let (response, code) = server.version().await;
assert_eq!(code, 200);
let version = response.as_object().unwrap();
@ -18,7 +18,7 @@ async fn get_settings_unexisting_index() {
#[actix_rt::test]
async fn test_healthyness() {
let server = Server::new().await;
let server = Server::new_shared();
let (response, status_code) = server.service.get("/health").await;
assert_eq!(status_code, 200);
@ -55,7 +55,7 @@ async fn stats() {
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202, "{}", response);
assert_eq!(code, 202, "{response}");
assert_eq!(response["taskUid"], 1);
index.wait_task(response.uid()).await.succeeded();
@ -78,8 +78,8 @@ async fn stats() {
#[actix_rt::test]
async fn add_remove_embeddings() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -216,8 +216,8 @@ async fn add_remove_embeddings() {
#[actix_rt::test]
async fn add_remove_embedded_documents() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -293,8 +293,8 @@ async fn add_remove_embedded_documents() {
#[actix_rt::test]
async fn update_embedder_settings() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
// 2 embedded documents for 3 embeddings in total
// but no embedders are added in the settings yet so we expect 0 embedded documents for 0 embeddings in total

View File

@ -1,8 +1,7 @@
mod errors;
mod webhook;
use meili_snap::insta::assert_json_snapshot;
use meili_snap::snapshot;
use meili_snap::{json_string, snapshot};
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
@ -11,14 +10,12 @@ use crate::json;
#[actix_rt::test]
async fn error_get_unexisting_task_status() {
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_task(1).await;
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.get_task(u32::MAX as u64).await;
let expected_response = json!({
"message": "Task `1` not found.",
"message": "Task `4294967295` not found.",
"code": "task_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#task_not_found"
@ -30,8 +27,8 @@ async fn error_get_unexisting_task_status() {
#[actix_rt::test]
async fn get_task_status() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (create_task, _status_code) = index.create(None).await;
let (add_task, _status_code) = index
.add_documents(
@ -42,7 +39,7 @@ async fn get_task_status() {
None,
)
.await;
index.wait_task(create_task.uid()).await.succeeded();
server.wait_task(create_task.uid()).await.succeeded();
let (_response, code) = index.get_task(add_task.uid()).await;
assert_eq!(code, 200);
// TODO check response format, as per #48
@ -50,10 +47,11 @@ async fn get_task_status() {
#[actix_rt::test]
async fn list_tasks() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
index
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
.await;
@ -64,6 +62,7 @@ async fn list_tasks() {
#[actix_rt::test]
async fn list_tasks_pagination_and_reverse() {
// do not use a shared server here, as we want to assert tasks ids and we need them to be stable
let server = Server::new().await;
// First of all we want to create a lot of tasks very quickly. The fastest way is to delete a lot of unexisting indexes
let mut last_task = None;
@ -71,7 +70,7 @@ async fn list_tasks_pagination_and_reverse() {
let index = server.index(format!("test-{i}"));
last_task = Some(index.create(None).await.0.uid());
}
server.wait_task(last_task.unwrap()).await;
server.wait_task(last_task.unwrap()).await.succeeded();
let (response, code) = server.tasks_filter("limit=3").await;
assert_eq!(code, 200);
@ -103,13 +102,14 @@ async fn list_tasks_pagination_and_reverse() {
#[actix_rt::test]
async fn list_tasks_with_star_filters() {
let server = Server::new().await;
// Do not use a unique index here, as we want to test the `indexUids=*` filter.
let index = server.index("test");
let (task, _code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
index
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
.await;
let (response, code) = index.service.get("/tasks?indexUids=test").await;
let (response, code) = index.service.get(format!("/tasks?indexUids={}", index.uid)).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 2);
@ -127,93 +127,102 @@ async fn list_tasks_with_star_filters() {
let (response, code) =
index.service.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*").await;
assert_eq!(code, 200, "{:?}", response);
assert_eq!(code, 200, "{response:?}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
let (response, code) = index
.service
.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test")
.get(format!(
"/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids={}",
index.uid
))
.await;
assert_eq!(code, 200, "{:?}", response);
assert_eq!(code, 200, "{response:?}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
let (response, code) = index
.service
.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test,*")
.await;
assert_eq!(code, 200, "{:?}", response);
assert_eq!(code, 200, "{response:?}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn list_tasks_status_filtered() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.failed();
server.wait_task(task.uid()).await.failed();
let (response, code) = index.filtered_tasks(&[], &["succeeded"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 1);
let (response, code) = index.filtered_tasks(&[], &["succeeded"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 1);
let (response, code) = index.filtered_tasks(&[], &["succeeded", "failed"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn list_tasks_type_filtered() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
index
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
.await;
let (response, code) = index.filtered_tasks(&["indexCreation"], &[], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 1);
let (response, code) =
index.filtered_tasks(&["indexCreation", "documentAdditionOrUpdate"], &[], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn list_tasks_invalid_canceled_by_filter() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
index
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = index
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
.await;
server.wait_task(task.uid()).await.succeeded();
let (response, code) = index.filtered_tasks(&[], &[], &["0"]).await;
assert_eq!(code, 200, "{}", response);
let (response, code) =
index.filtered_tasks(&[], &[], &[format!("{}", task.uid()).as_str()]).await;
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 0);
}
#[actix_rt::test]
async fn list_tasks_status_and_type_filtered() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
index
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
.await;
let (response, code) = index.filtered_tasks(&["indexCreation"], &["failed"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 0);
let (response, code) = index
@ -223,12 +232,12 @@ async fn list_tasks_status_and_type_filtered() {
&[],
)
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
macro_rules! assert_valid_summarized_task {
($response:expr, $task_type:literal, $index:literal) => {{
($response:expr, $task_type:literal, $index:tt) => {{
assert_eq!($response.as_object().unwrap().len(), 5);
assert!($response["taskUid"].as_u64().is_some());
assert_eq!($response["indexUid"], $index);
@ -242,49 +251,49 @@ macro_rules! assert_valid_summarized_task {
#[actix_web::test]
async fn test_summarized_task_view() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let index_uid = index.uid.clone();
let (response, _) = index.create(None).await;
assert_valid_summarized_task!(response, "indexCreation", "test");
assert_valid_summarized_task!(response, "indexCreation", index_uid);
let (response, _) = index.update(None).await;
assert_valid_summarized_task!(response, "indexUpdate", "test");
assert_valid_summarized_task!(response, "indexUpdate", index_uid);
let (response, _) = index.update_settings(json!({})).await;
assert_valid_summarized_task!(response, "settingsUpdate", "test");
assert_valid_summarized_task!(response, "settingsUpdate", index_uid);
let (response, _) = index.update_documents(json!([{"id": 1}]), None).await;
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", "test");
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", index_uid);
let (response, _) = index.add_documents(json!([{"id": 1}]), None).await;
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", "test");
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", index_uid);
let (response, _) = index.delete_document(1).await;
assert_valid_summarized_task!(response, "documentDeletion", "test");
assert_valid_summarized_task!(response, "documentDeletion", index_uid);
let (response, _) = index.clear_all_documents().await;
assert_valid_summarized_task!(response, "documentDeletion", "test");
assert_valid_summarized_task!(response, "documentDeletion", index_uid);
let (response, _) = index.delete().await;
assert_valid_summarized_task!(response, "indexDeletion", "test");
assert_valid_summarized_task!(response, "indexDeletion", index_uid);
}
#[actix_web::test]
async fn test_summarized_document_addition_or_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(0).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -302,15 +311,14 @@ async fn test_summarized_document_addition_or_update() {
let (task, _status_code) =
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(1).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -329,18 +337,22 @@ async fn test_summarized_document_addition_or_update() {
#[actix_web::test]
async fn test_summarized_delete_documents_by_batch() {
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(0).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
let server = Server::new_shared();
let index = server.unique_index();
let non_existing_task_id1 = u32::MAX as u64;
let non_existing_task_id2 = non_existing_task_id1 - 1;
let non_existing_task_id3 = non_existing_task_id1 - 2;
let (task, _status_code) = index
.delete_batch(vec![non_existing_task_id1, non_existing_task_id2, non_existing_task_id3])
.await;
server.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -350,7 +362,7 @@ async fn test_summarized_delete_documents_by_batch() {
"originalFilter": null
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -364,15 +376,14 @@ async fn test_summarized_delete_documents_by_batch() {
index.create(None).await;
let (del_task, _status_code) = index.delete_batch(vec![42]).await;
index.wait_task(del_task.uid()).await.succeeded();
server.wait_task(del_task.uid()).await.succeeded();
let (task, _) = index.get_task(del_task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -392,20 +403,19 @@ async fn test_summarized_delete_documents_by_batch() {
#[actix_web::test]
async fn test_summarized_delete_documents_by_filter() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(task.uid()).await.failed();
server.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -415,7 +425,7 @@ async fn test_summarized_delete_documents_by_filter() {
"originalFilter": "\"doggo = bernese\""
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -430,15 +440,14 @@ async fn test_summarized_delete_documents_by_filter() {
index.create(None).await;
let (task, _status_code) =
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(task.uid()).await.failed();
server.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -448,7 +457,7 @@ async fn test_summarized_delete_documents_by_filter() {
"originalFilter": "\"doggo = bernese\""
},
"error": {
"message": "Index `test`: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
"message": "Index `[uuid]`: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
@ -463,15 +472,14 @@ async fn test_summarized_delete_documents_by_filter() {
index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await;
let (task, _status_code) =
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 4,
"batchUid": 4,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -491,18 +499,17 @@ async fn test_summarized_delete_documents_by_filter() {
#[actix_web::test]
async fn test_summarized_delete_document_by_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.delete_document(1).await;
index.wait_task(task.uid()).await.failed();
server.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -512,7 +519,7 @@ async fn test_summarized_delete_document_by_id() {
"originalFilter": null
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -526,15 +533,14 @@ async fn test_summarized_delete_document_by_id() {
index.create(None).await;
let (task, _status_code) = index.delete_document(42).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -554,12 +560,12 @@ async fn test_summarized_delete_document_by_id() {
#[actix_web::test]
async fn test_summarized_settings_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
// here we should find my payload even in the failed task.
let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Invalid value at `.rankingRules[0]`: `custom` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules.",
"code": "invalid_settings_ranking_rules",
@ -569,15 +575,14 @@ async fn test_summarized_settings_update() {
"###);
let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
@ -605,18 +610,17 @@ async fn test_summarized_settings_update() {
#[actix_web::test]
async fn test_summarized_index_creation() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
@ -632,15 +636,14 @@ async fn test_summarized_index_creation() {
"###);
let (task, _status_code) = index.create(Some("doggos")).await;
index.wait_task(task.uid()).await.failed();
server.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexCreation",
"canceledBy": null,
@ -648,7 +651,7 @@ async fn test_summarized_index_creation() {
"primaryKey": "doggos"
},
"error": {
"message": "Index `test` already exists.",
"message": "Index `[uuid]` already exists.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
@ -663,16 +666,16 @@ async fn test_summarized_index_creation() {
#[actix_web::test]
async fn test_summarized_index_deletion() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (ret, _code) = index.delete().await;
let task = index.wait_task(ret.uid()).await;
let task = server.wait_task(ret.uid()).await;
snapshot!(task,
@r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexDeletion",
"canceledBy": null,
@ -680,7 +683,7 @@ async fn test_summarized_index_deletion() {
"deletedDocuments": 0
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -697,13 +700,13 @@ async fn test_summarized_index_deletion() {
// both tasks may get autobatched and the deleted documents count will be wrong.
let (ret, _code) =
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await;
let task = index.wait_task(ret.uid()).await;
let task = server.wait_task(ret.uid()).await;
snapshot!(task,
@r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -720,13 +723,13 @@ async fn test_summarized_index_deletion() {
"###);
let (ret, _code) = index.delete().await;
let task = index.wait_task(ret.uid()).await;
let task = server.wait_task(ret.uid()).await;
snapshot!(task,
@r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
@ -743,13 +746,13 @@ async fn test_summarized_index_deletion() {
// What happens when you delete an index that doesn't exists.
let (ret, _code) = index.delete().await;
let task = index.wait_task(ret.uid()).await;
let task = server.wait_task(ret.uid()).await;
snapshot!(task,
@r###"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexDeletion",
"canceledBy": null,
@ -757,7 +760,7 @@ async fn test_summarized_index_deletion() {
"deletedDocuments": 0
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -772,19 +775,18 @@ async fn test_summarized_index_deletion() {
#[actix_web::test]
async fn test_summarized_index_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
// If the index doesn't exist yet, we should get errors with or without the primary key.
let (task, _status_code) = index.update(None).await;
index.wait_task(task.uid()).await.failed();
server.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexUpdate",
"canceledBy": null,
@ -792,7 +794,7 @@ async fn test_summarized_index_update() {
"primaryKey": null
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -805,15 +807,14 @@ async fn test_summarized_index_update() {
"###);
let (task, _status_code) = index.update(Some("bones")).await;
index.wait_task(task.uid()).await.failed();
server.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexUpdate",
"canceledBy": null,
@ -821,7 +822,7 @@ async fn test_summarized_index_update() {
"primaryKey": "bones"
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -837,15 +838,14 @@ async fn test_summarized_index_update() {
index.create(None).await;
let (task, _status_code) = index.update(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 3,
"batchUid": 3,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexUpdate",
"canceledBy": null,
@ -861,15 +861,14 @@ async fn test_summarized_index_update() {
"###);
let (task, _status_code) = index.update(Some("bones")).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 4,
"batchUid": 4,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexUpdate",
"canceledBy": null,
@ -887,7 +886,7 @@ async fn test_summarized_index_update() {
#[actix_web::test]
async fn test_summarized_index_swap() {
let server = Server::new().await;
let server = Server::new_shared();
let (task, _status_code) = server
.index_swap(json!([
{ "indexes": ["doggos", "cattos"] }
@ -895,12 +894,11 @@ async fn test_summarized_index_swap() {
.await;
server.wait_task(task.uid()).await.failed();
let (task, _) = server.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "failed",
"type": "indexSwap",
@ -928,23 +926,25 @@ async fn test_summarized_index_swap() {
}
"###);
let (task, _code) = server.index("doggos").create(None).await;
let doggos_index = server.unique_index();
let (task, _code) = doggos_index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = server.index("cattos").create(None).await;
let cattos_index = server.unique_index();
let (task, _code) = cattos_index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = server
.index_swap(json!([
{ "indexes": ["doggos", "cattos"] }
{ "indexes": [doggos_index.uid, cattos_index.uid] }
]))
.await;
server.wait_task(task.uid()).await.succeeded();
let (task, _) = server.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.indexes[0]" => "doggos", ".**.indexes[1]" => "cattos", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
{
"uid": 3,
"batchUid": 3,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "indexSwap",
@ -970,20 +970,21 @@ async fn test_summarized_index_swap() {
#[actix_web::test]
async fn test_summarized_task_cancelation() {
let server = Server::new().await;
let index = server.index("doggos");
let server = Server::new_shared();
let index = server.unique_index();
// to avoid being flaky we're only going to cancel an already finished task :(
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = server.cancel_tasks("uids=0").await;
index.wait_task(task.uid()).await.succeeded();
let task_uid = task.uid();
server.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = server.cancel_tasks(format!("uids={task_uid}").as_str()).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.originalFilter" => "[of]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
{
"uid": 1,
"batchUid": 1,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "taskCancelation",
@ -991,7 +992,7 @@ async fn test_summarized_task_cancelation() {
"details": {
"matchedTasks": 1,
"canceledTasks": 0,
"originalFilter": "?uids=0"
"originalFilter": "[of]"
},
"error": null,
"duration": "[duration]",
@ -1004,20 +1005,19 @@ async fn test_summarized_task_cancelation() {
#[actix_web::test]
async fn test_summarized_task_deletion() {
let server = Server::new().await;
let index = server.index("doggos");
let server = Server::new_shared();
let index = server.unique_index();
// to avoid being flaky we're only going to delete an already finished task :(
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = server.delete_tasks("uids=0").await;
index.wait_task(task.uid()).await.succeeded();
server.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "taskDeletion",
@ -1038,22 +1038,22 @@ async fn test_summarized_task_deletion() {
#[actix_web::test]
async fn test_summarized_dump_creation() {
// Do not use a shared server because it takes too long to create a dump
let server = Server::new().await;
let (task, _status_code) = server.create_dump().await;
server.wait_task(task.uid()).await;
let (task, _) = server.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".details.dumpUid" => "[dumpUid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "dumpCreation",
"canceledBy": null,
"details": {
"dumpUid": "[dumpUid]"
"dumpUid": "[dump_uid]"
},
"error": null,
"duration": "[duration]",

View File

@ -6,8 +6,8 @@ use crate::vector::generate_default_user_provided_documents;
#[actix_rt::test]
async fn retrieve_binary_quantize_status_in_the_settings() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -65,8 +65,8 @@ async fn retrieve_binary_quantize_status_in_the_settings() {
#[actix_rt::test]
async fn binary_quantize_before_sending_documents() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -139,8 +139,8 @@ async fn binary_quantize_before_sending_documents() {
#[actix_rt::test]
async fn binary_quantize_after_sending_documents() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -226,8 +226,8 @@ async fn binary_quantize_after_sending_documents() {
#[actix_rt::test]
async fn try_to_disable_binary_quantization() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -256,11 +256,11 @@ async fn try_to_disable_binary_quantization() {
.await;
snapshot!(code, @"202 Accepted");
let ret = server.wait_task(response.uid()).await;
snapshot!(ret, @r#"
snapshot!(json_string!(ret, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".finishedAt" => "[date]", ".startedAt" => "[date]" }), @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "doggo",
"indexUid": "[uuid]",
"status": "failed",
"type": "settingsUpdate",
"canceledBy": null,
@ -274,7 +274,7 @@ async fn try_to_disable_binary_quantization() {
}
},
"error": {
"message": "Index `doggo`: `.embedders.manual.binaryQuantized`: Cannot disable the binary quantization.\n - Note: Binary quantization is a lossy operation that cannot be reverted.\n - Hint: Add a new embedder that is non-quantized and regenerate the vectors.",
"message": "Index `[uuid]`: `.embedders.manual.binaryQuantized`: Cannot disable the binary quantization.\n - Note: Binary quantization is a lossy operation that cannot be reverted.\n - Hint: Add a new embedder that is non-quantized and regenerate the vectors.",
"code": "invalid_settings_embedders",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_settings_embedders"

View File

@ -50,7 +50,7 @@ impl AttributePatterns {
///
/// * `pattern` - The pattern to match against.
/// * `str` - The string to match against the pattern.
fn match_pattern(pattern: &str, str: &str) -> PatternMatch {
pub fn match_pattern(pattern: &str, str: &str) -> PatternMatch {
// If the pattern is a wildcard, return Match
if pattern == "*" {
return PatternMatch::Match;

View File

@ -1,11 +1,13 @@
use std::cmp::Ordering;
use heed::RoTxn;
use itertools::Itertools;
use roaring::RoaringBitmap;
use crate::score_details::{ScoreDetails, ScoreValue, ScoringStrategy};
use crate::search::new::{distinct_fid, distinct_single_docid};
use crate::search::SemanticSearch;
use crate::{MatchingWords, Result, Search, SearchResult};
use crate::{Index, MatchingWords, Result, Search, SearchResult};
struct ScoreWithRatioResult {
matching_words: MatchingWords,
@ -91,7 +93,10 @@ impl ScoreWithRatioResult {
keyword_results: Self,
from: usize,
length: usize,
) -> (SearchResult, u32) {
distinct: Option<&str>,
index: &Index,
rtxn: &RoTxn<'_>,
) -> Result<(SearchResult, u32)> {
#[derive(Clone, Copy)]
enum ResultSource {
Semantic,
@ -106,8 +111,9 @@ impl ScoreWithRatioResult {
vector_results.document_scores.len() + keyword_results.document_scores.len(),
);
let mut documents_seen = RoaringBitmap::new();
for ((docid, (main_score, _sub_score)), source) in vector_results
let distinct_fid = distinct_fid(distinct, index, rtxn)?;
let mut excluded_documents = RoaringBitmap::new();
for res in vector_results
.document_scores
.into_iter()
.zip(std::iter::repeat(ResultSource::Semantic))
@ -121,13 +127,33 @@ impl ScoreWithRatioResult {
compare_scores(left, right).is_ge()
},
)
// remove documents we already saw
.filter(|((docid, _), _)| documents_seen.insert(*docid))
// remove documents we already saw and apply distinct rule
.filter_map(|item @ ((docid, _), _)| {
if !excluded_documents.insert(docid) {
// the document was already added, or is indistinct from an already-added document.
return None;
}
if let Some(distinct_fid) = distinct_fid {
if let Err(error) = distinct_single_docid(
index,
rtxn,
distinct_fid,
docid,
&mut excluded_documents,
) {
return Some(Err(error));
}
}
Some(Ok(item))
})
// start skipping **after** the filter
.skip(from)
// take **after** skipping
.take(length)
{
let ((docid, (main_score, _sub_score)), source) = res?;
if let ResultSource::Semantic = source {
semantic_hit_count += 1;
}
@ -136,10 +162,24 @@ impl ScoreWithRatioResult {
document_scores.push(main_score);
}
(
// compute the set of candidates from both sets
let candidates = vector_results.candidates | keyword_results.candidates;
let must_remove_redundant_candidates = distinct_fid.is_some();
let candidates = if must_remove_redundant_candidates {
// patch-up the candidates to remove the indistinct documents, then add back the actual hits
let mut candidates = candidates - excluded_documents;
for docid in &documents_ids {
candidates.insert(*docid);
}
candidates
} else {
candidates
};
Ok((
SearchResult {
matching_words: keyword_results.matching_words,
candidates: vector_results.candidates | keyword_results.candidates,
candidates,
documents_ids,
document_scores,
degraded: vector_results.degraded | keyword_results.degraded,
@ -147,7 +187,7 @@ impl ScoreWithRatioResult {
| keyword_results.used_negative_operator,
},
semantic_hit_count,
)
))
}
}
@ -226,8 +266,15 @@ impl Search<'_> {
let keyword_results = ScoreWithRatioResult::new(keyword_results, 1.0 - semantic_ratio);
let vector_results = ScoreWithRatioResult::new(vector_results, semantic_ratio);
let (merge_results, semantic_hit_count) =
ScoreWithRatioResult::merge(vector_results, keyword_results, self.offset, self.limit);
let (merge_results, semantic_hit_count) = ScoreWithRatioResult::merge(
vector_results,
keyword_results,
self.offset,
self.limit,
search.distinct.as_deref(),
search.index,
search.rtxn,
)?;
assert!(merge_results.documents_ids.len() <= self.limit);
Ok((merge_results, Some(semantic_hit_count)))
}

View File

@ -4,7 +4,9 @@ use super::logger::SearchLogger;
use super::ranking_rules::{BoxRankingRule, RankingRuleQueryTrait};
use super::SearchContext;
use crate::score_details::{ScoreDetails, ScoringStrategy};
use crate::search::new::distinct::{apply_distinct_rule, distinct_single_docid, DistinctOutput};
use crate::search::new::distinct::{
apply_distinct_rule, distinct_fid, distinct_single_docid, DistinctOutput,
};
use crate::{Result, TimeBudget};
pub struct BucketSortOutput {
@ -35,16 +37,7 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
logger.ranking_rules(&ranking_rules);
logger.initial_universe(universe);
let distinct_field = match distinct {
Some(distinct) => Some(distinct),
None => ctx.index.distinct_field(ctx.txn)?,
};
let distinct_fid = if let Some(field) = distinct_field {
ctx.index.fields_ids_map(ctx.txn)?.id(field)
} else {
None
};
let distinct_fid = distinct_fid(distinct, ctx.index, ctx.txn)?;
if universe.len() < from as u64 {
return Ok(BucketSortOutput {

View File

@ -9,7 +9,7 @@ use crate::heed_codec::facet::{
FacetGroupKey, FacetGroupKeyCodec, FacetGroupValueCodec, FieldDocIdFacetCodec,
};
use crate::heed_codec::BytesRefCodec;
use crate::{Index, Result, SearchContext};
use crate::{FieldId, Index, Result, SearchContext};
pub struct DistinctOutput {
pub remaining: RoaringBitmap,
@ -121,3 +121,18 @@ pub fn facet_string_values<'a>(
fn facet_values_prefix_key(distinct: u16, id: u32) -> [u8; FID_SIZE + DOCID_SIZE] {
concat_arrays::concat_arrays!(distinct.to_be_bytes(), id.to_be_bytes())
}
pub fn distinct_fid(
query_distinct_field: Option<&str>,
index: &Index,
rtxn: &RoTxn<'_>,
) -> Result<Option<FieldId>> {
let distinct_field = match query_distinct_field {
Some(distinct) => Some(distinct),
None => index.distinct_field(rtxn)?,
};
let distinct_fid =
if let Some(field) = distinct_field { index.fields_ids_map(rtxn)?.id(field) } else { None };
Ok(distinct_fid)
}

View File

@ -28,6 +28,7 @@ use std::time::Duration;
use bucket_sort::{bucket_sort, BucketSortOutput};
use charabia::{Language, TokenizerBuilder};
use db_cache::DatabaseCache;
pub use distinct::{distinct_fid, distinct_single_docid};
use exact_attribute::ExactAttribute;
use graph_based_ranking_rule::{Exactness, Fid, Position, Proximity, Typo};
use heed::RoTxn;
@ -51,6 +52,7 @@ pub use self::geo_sort::{Parameter as GeoSortParameter, Strategy as GeoSortStrat
use self::graph_based_ranking_rule::Words;
use self::interner::Interned;
use self::vector_sort::VectorSort;
use crate::attribute_patterns::{match_pattern, PatternMatch};
use crate::constants::RESERVED_GEO_FIELD_NAME;
use crate::index::PrefixSearch;
use crate::localized_attributes_rules::LocalizedFieldIds;
@ -119,17 +121,37 @@ impl<'ctx> SearchContext<'ctx> {
let searchable_fields_weights = self.index.searchable_fields_and_weights(self.txn)?;
let exact_attributes_ids = self.index.exact_attributes_ids(self.txn)?;
let mut wildcard = false;
let mut universal_wildcard = false;
let mut restricted_fids = RestrictedFids::default();
for field_name in attributes_to_search_on {
if field_name == "*" {
wildcard = true;
universal_wildcard = true;
// we cannot early exit as we want to returns error in case of unknown fields
continue;
}
let searchable_weight =
searchable_fields_weights.iter().find(|(name, _, _)| name == field_name);
// The field is not searchable but may contain a wildcard pattern
if searchable_weight.is_none() && field_name.contains("*") {
let matching_searchable_weights: Vec<_> = searchable_fields_weights
.iter()
.filter(|(name, _, _)| match_pattern(field_name, name) == PatternMatch::Match)
.collect();
if !matching_searchable_weights.is_empty() {
for (_name, fid, weight) in matching_searchable_weights {
if exact_attributes_ids.contains(fid) {
restricted_fids.exact.push((*fid, *weight));
} else {
restricted_fids.tolerant.push((*fid, *weight));
}
}
continue;
}
}
let (fid, weight) = match searchable_weight {
// The Field id exist and the field is searchable
Some((_name, fid, weight)) => (*fid, *weight),
@ -159,7 +181,7 @@ impl<'ctx> SearchContext<'ctx> {
};
}
if wildcard {
if universal_wildcard {
self.restricted_fids = None;
} else {
self.restricted_fids = Some(restricted_fids);

View File

@ -202,11 +202,11 @@ pub fn number_of_typos_allowed<'ctx>(
Ok(Box::new(move |word: &str| {
if !authorize_typos
|| word.len() < min_len_one_typo as usize
|| word.chars().count() < min_len_one_typo as usize
|| exact_words.as_ref().is_some_and(|fst| fst.contains(word))
{
0
} else if word.len() < min_len_two_typos as usize {
} else if word.chars().count() < min_len_two_typos as usize {
1
} else {
2
@ -380,4 +380,62 @@ mod tests {
Ok(())
}
#[test]
fn test_unicode_typo_tolerance_fixed() -> Result<()> {
let temp_index = temp_index_with_documents();
let rtxn = temp_index.read_txn()?;
let ctx = SearchContext::new(&temp_index, &rtxn)?;
let nbr_typos = number_of_typos_allowed(&ctx)?;
// ASCII word "doggy" (5 chars, 5 bytes)
let ascii_word = "doggy";
let ascii_typos = nbr_typos(ascii_word);
// Cyrillic word "собак" (5 chars, 10 bytes)
let cyrillic_word = "собак";
let cyrillic_typos = nbr_typos(cyrillic_word);
// Both words have 5 characters, so they should have the same typo tolerance
assert_eq!(
ascii_typos, cyrillic_typos,
"Words with same character count should get same typo tolerance"
);
// With default settings (oneTypo=5, twoTypos=9), 5-char words should get 1 typo
assert_eq!(ascii_typos, 1, "5-character word should get 1 typo tolerance");
assert_eq!(cyrillic_typos, 1, "5-character word should get 1 typo tolerance");
Ok(())
}
#[test]
fn test_various_unicode_scripts() -> Result<()> {
let temp_index = temp_index_with_documents();
let rtxn = temp_index.read_txn()?;
let ctx = SearchContext::new(&temp_index, &rtxn)?;
let nbr_typos = number_of_typos_allowed(&ctx)?;
// Let's use 5-character words for consistent testing
let five_char_words = vec![
("doggy", "ASCII"), // 5 chars, 5 bytes
("café!", "Accented"), // 5 chars, 7 bytes
("собак", "Cyrillic"), // 5 chars, 10 bytes
];
let expected_typos = 1; // With default settings, 5-char words get 1 typo
for (word, script) in five_char_words {
let typos = nbr_typos(word);
assert_eq!(
typos, expected_typos,
"{} word '{}' should get {} typo(s)",
script, word, expected_typos
);
}
Ok(())
}
}

View File

@ -72,7 +72,7 @@ fn test_2gram_simple() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -103,7 +103,7 @@ fn test_3gram_simple() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -153,7 +153,7 @@ fn test_no_disable_ngrams() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -179,7 +179,7 @@ fn test_2gram_prefix() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -208,7 +208,7 @@ fn test_3gram_prefix() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -260,7 +260,7 @@ fn test_disable_split_words() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();

View File

@ -151,7 +151,7 @@ fn test_no_typo() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();

View File

@ -19,10 +19,7 @@ use crate::update::{
};
use crate::vector::settings::{EmbedderSource, EmbeddingSettings};
use crate::vector::EmbeddingConfigs;
use crate::{
db_snap, obkv_to_json, Filter, FilterableAttributesRule, Index, Search, SearchResult,
ThreadPoolNoAbortBuilder,
};
use crate::{db_snap, obkv_to_json, Filter, FilterableAttributesRule, Index, Search, SearchResult};
pub(crate) struct TempIndex {
pub inner: Index,
@ -62,15 +59,8 @@ impl TempIndex {
wtxn: &mut RwTxn<'t>,
documents: Mmap,
) -> Result<(), crate::error::Error> {
let local_pool;
let indexer_config = &self.indexer_config;
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let rtxn = self.inner.read_txn()?;
let db_fields_ids_map = self.inner.fields_ids_map(&rtxn)?;
@ -153,15 +143,8 @@ impl TempIndex {
wtxn: &mut RwTxn<'t>,
external_document_ids: Vec<String>,
) -> Result<(), crate::error::Error> {
let local_pool;
let indexer_config = &self.indexer_config;
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let rtxn = self.inner.read_txn()?;
let db_fields_ids_map = self.inner.fields_ids_map(&rtxn)?;
@ -231,15 +214,8 @@ fn aborting_indexation() {
let mut wtxn = index.inner.write_txn().unwrap();
let should_abort = AtomicBool::new(false);
let local_pool;
let indexer_config = &index.indexer_config;
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let rtxn = index.inner.read_txn().unwrap();
let db_fields_ids_map = index.inner.fields_ids_map(&rtxn).unwrap();

View File

@ -54,6 +54,10 @@ impl ThreadPoolNoAbortBuilder {
ThreadPoolNoAbortBuilder::default()
}
pub fn new_for_indexing() -> ThreadPoolNoAbortBuilder {
ThreadPoolNoAbortBuilder::default().thread_name(|index| format!("indexing-thread:{index}"))
}
pub fn thread_name<F>(mut self, closure: F) -> Self
where
F: FnMut(usize) -> String + 'static,

View File

@ -33,7 +33,6 @@ use crate::documents::{obkv_to_object, DocumentsBatchReader};
use crate::error::{Error, InternalError};
use crate::index::{PrefixSearch, PrefixSettings};
use crate::progress::Progress;
use crate::thread_pool_no_abort::ThreadPoolNoAbortBuilder;
pub use crate::update::index_documents::helpers::CursorClonableMmap;
use crate::update::{
IndexerConfig, UpdateIndexingStep, WordPrefixDocids, WordPrefixIntegerDocids, WordsPrefixesFst,
@ -228,24 +227,7 @@ where
let possible_embedding_mistakes =
crate::vector::error::PossibleEmbeddingMistakes::new(&field_distribution);
let backup_pool;
let pool = match self.indexer_config.thread_pool {
Some(ref pool) => pool,
None => {
// We initialize a backup pool with the default
// settings if none have already been set.
#[allow(unused_mut)]
let mut pool_builder = ThreadPoolNoAbortBuilder::new();
#[cfg(test)]
{
pool_builder = pool_builder.num_threads(1);
}
backup_pool = pool_builder.build()?;
&backup_pool
}
};
let pool = &self.indexer_config.thread_pool;
// create LMDB writer channel
let (lmdb_writer_sx, lmdb_writer_rx): (

View File

@ -1,7 +1,7 @@
use grenad::CompressionType;
use super::GrenadParameters;
use crate::thread_pool_no_abort::ThreadPoolNoAbort;
use crate::{thread_pool_no_abort::ThreadPoolNoAbort, ThreadPoolNoAbortBuilder};
#[derive(Debug)]
pub struct IndexerConfig {
@ -9,9 +9,10 @@ pub struct IndexerConfig {
pub max_nb_chunks: Option<usize>,
pub documents_chunk_size: Option<usize>,
pub max_memory: Option<usize>,
pub max_threads: Option<usize>,
pub chunk_compression_type: CompressionType,
pub chunk_compression_level: Option<u32>,
pub thread_pool: Option<ThreadPoolNoAbort>,
pub thread_pool: ThreadPoolNoAbort,
pub max_positions_per_attributes: Option<u32>,
pub skip_index_budget: bool,
}
@ -27,16 +28,39 @@ impl IndexerConfig {
}
}
/// By default use only 1 thread for indexing in tests
#[cfg(test)]
pub fn default_thread_pool_and_threads() -> (ThreadPoolNoAbort, Option<usize>) {
let pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.num_threads(1)
.build()
.expect("failed to build default rayon thread pool");
(pool, Some(1))
}
#[cfg(not(test))]
pub fn default_thread_pool_and_threads() -> (ThreadPoolNoAbort, Option<usize>) {
let pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.build()
.expect("failed to build default rayon thread pool");
(pool, None)
}
impl Default for IndexerConfig {
fn default() -> Self {
let (thread_pool, max_threads) = default_thread_pool_and_threads();
Self {
max_threads,
thread_pool,
log_every_n: None,
max_nb_chunks: None,
documents_chunk_size: None,
max_memory: None,
chunk_compression_type: CompressionType::None,
chunk_compression_level: None,
thread_pool: None,
max_positions_per_attributes: None,
skip_index_budget: false,
}

View File

@ -5,7 +5,7 @@ pub use self::concurrent_available_ids::ConcurrentAvailableIds;
pub use self::facet::bulk::FacetsUpdateBulk;
pub use self::facet::incremental::FacetsUpdateIncrementalInner;
pub use self::index_documents::*;
pub use self::indexer_config::IndexerConfig;
pub use self::indexer_config::{default_thread_pool_and_threads, IndexerConfig};
pub use self::new::ChannelCongestion;
pub use self::settings::{validate_embedding_settings, Setting, Settings};
pub use self::update_step::UpdateIndexingStep;

View File

@ -8,7 +8,7 @@ use hashbrown::HashMap;
use serde_json::Value;
use super::super::cache::BalancedCaches;
use super::facet_document::extract_document_facets;
use super::facet_document::{extract_document_facets, extract_geo_document};
use super::FacetKind;
use crate::fields_ids_map::metadata::Metadata;
use crate::filterable_attributes_rules::match_faceted_field;
@ -90,17 +90,12 @@ impl FacetedDocidsExtractor {
let mut cached_sorter = context.data.borrow_mut_or_yield();
let mut del_add_facet_value = DelAddFacetValue::new(&context.doc_alloc);
let docid = document_change.docid();
let res = match document_change {
DocumentChange::Deletion(inner) => extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
// Using a macro avoid borrowing the parameters as mutable in both closures at
// the same time by postponing their creation
macro_rules! facet_fn {
(del) => {
|fid: FieldId, meta: Metadata, depth: perm_json_p::Depth, value: &Value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
@ -114,91 +109,10 @@ impl FacetedDocidsExtractor {
depth,
value,
)
},
),
DocumentChange::Update(inner) => {
let has_changed = inner.has_changed_for_fields(
&mut |field_name| {
match_faceted_field(
field_name,
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
)
},
rtxn,
index,
context.db_fields_ids_map,
)?;
let has_changed_for_geo_fields =
inner.has_changed_for_geo_fields(rtxn, index, context.db_fields_ids_map)?;
if !has_changed && !has_changed_for_geo_fields {
return Ok(());
}
extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
BalancedCaches::insert_del_u32,
&mut del_add_facet_value,
DelAddFacetValue::insert_del,
docid,
fid,
meta,
filterable_attributes,
depth,
value,
)
},
)?;
extract_document_facets(
inner.merged(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
BalancedCaches::insert_add_u32,
&mut del_add_facet_value,
DelAddFacetValue::insert_add,
docid,
fid,
meta,
filterable_attributes,
depth,
value,
)
},
)
}
DocumentChange::Insertion(inner) => extract_document_facets(
inner.inserted(),
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
};
(add) => {
|fid: FieldId, meta: Metadata, depth: perm_json_p::Depth, value: &Value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
@ -212,12 +126,116 @@ impl FacetedDocidsExtractor {
depth,
value,
)
},
),
}
};
}
match document_change {
DocumentChange::Deletion(inner) => {
let mut del = facet_fn!(del);
extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut del,
)?;
if is_geo_enabled {
extract_geo_document(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut del,
)?;
}
}
DocumentChange::Update(inner) => {
let has_changed_for_facets = inner.has_changed_for_fields(
&mut |field_name| {
match_faceted_field(
field_name,
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
)
},
rtxn,
index,
context.db_fields_ids_map,
)?;
// 1. Maybe update doc
if has_changed_for_facets {
extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut facet_fn!(del),
)?;
extract_document_facets(
inner.merged(rtxn, index, context.db_fields_ids_map)?,
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut facet_fn!(add),
)?;
}
// 2. Maybe update geo
if is_geo_enabled
&& inner.has_changed_for_geo_fields(rtxn, index, context.db_fields_ids_map)?
{
extract_geo_document(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut facet_fn!(del),
)?;
extract_geo_document(
inner.merged(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut facet_fn!(add),
)?;
}
}
DocumentChange::Insertion(inner) => {
let mut add = facet_fn!(add);
extract_document_facets(
inner.inserted(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut add,
)?;
if is_geo_enabled {
extract_geo_document(
inner.inserted(),
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut add,
)?;
}
}
};
del_add_facet_value.send_data(docid, sender, &context.doc_alloc).unwrap();
res
Ok(())
}
#[allow(clippy::too_many_arguments)]

View File

@ -15,13 +15,11 @@ use crate::{
#[allow(clippy::too_many_arguments)]
pub fn extract_document_facets<'doc>(
document: impl Document<'doc>,
external_document_id: &str,
field_id_map: &mut GlobalFieldsIdsMap,
filterable_attributes: &[FilterableAttributesRule],
sortable_fields: &HashSet<String>,
asc_desc_fields: &HashSet<String>,
distinct_field: &Option<String>,
is_geo_enabled: bool,
facet_fn: &mut impl FnMut(FieldId, Metadata, perm_json_p::Depth, &Value) -> Result<()>,
) -> Result<()> {
// return the match result for the given field name.
@ -101,17 +99,24 @@ pub fn extract_document_facets<'doc>(
}
}
if is_geo_enabled {
if let Some(geo_value) = document.geo_field()? {
if let Some([lat, lng]) = extract_geo_coordinates(external_document_id, geo_value)? {
let ((lat_fid, lat_meta), (lng_fid, lng_meta)) = field_id_map
.id_with_metadata_or_insert("_geo.lat")
.zip(field_id_map.id_with_metadata_or_insert("_geo.lng"))
.ok_or(UserError::AttributeLimitReached)?;
Ok(())
}
facet_fn(lat_fid, lat_meta, perm_json_p::Depth::OnBaseKey, &lat.into())?;
facet_fn(lng_fid, lng_meta, perm_json_p::Depth::OnBaseKey, &lng.into())?;
}
pub fn extract_geo_document<'doc>(
document: impl Document<'doc>,
external_document_id: &str,
field_id_map: &mut GlobalFieldsIdsMap,
facet_fn: &mut impl FnMut(FieldId, Metadata, perm_json_p::Depth, &Value) -> Result<()>,
) -> Result<()> {
if let Some(geo_value) = document.geo_field()? {
if let Some([lat, lng]) = extract_geo_coordinates(external_document_id, geo_value)? {
let ((lat_fid, lat_meta), (lng_fid, lng_meta)) = field_id_map
.id_with_metadata_or_insert("_geo.lat")
.zip(field_id_map.id_with_metadata_or_insert("_geo.lng"))
.ok_or(UserError::AttributeLimitReached)?;
facet_fn(lat_fid, lat_meta, perm_json_p::Depth::OnBaseKey, &lat.into())?;
facet_fn(lng_fid, lng_meta, perm_json_p::Depth::OnBaseKey, &lng.into())?;
}
}

View File

@ -336,7 +336,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
self.primary_key = Setting::Set(primary_key);
}
pub fn set_autorize_typos(&mut self, val: bool) {
pub fn set_authorize_typos(&mut self, val: bool) {
self.authorize_typos = Setting::Set(val);
}

View File

@ -792,7 +792,7 @@ fn test_disable_typo() {
index
.update_settings_using_wtxn(&mut txn, |settings| {
settings.set_autorize_typos(false);
settings.set_authorize_typos(false);
})
.unwrap();