Compare commits

...

123 Commits

Author SHA1 Message Date
8f75c021c6 Remove the step to update the GitHub rule set 2025-06-09 11:48:17 +02:00
1d02efeab9 Merge pull request #5615 from martin-g/faster-tasks-mod-it-tests
tests: Faster tasks::mod IT tests
2025-06-04 12:38:39 +00:00
53fc98d3b0 Merge pull request #5632 from martin-g/db-change-label
ci: Use `GITHUB_TOKEN` secret for the `db change check` workflow
2025-06-04 12:23:01 +00:00
61b0f50d4d Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-04 13:37:42 +03:00
c9efdf8c88 Render details.dumpUid as [dump_uid] in Value's Display
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-04 13:00:47 +03:00
72736c0ea9 Merge pull request #5627 from meilisearch/skip_remote_test
ignore flaky test
2025-06-04 08:28:24 +00:00
49317bbee4 Merge pull request #5625 from martin-g/faster-search-hybrid-it-tests
tests: Faster search::hybrid IT tests
2025-06-03 13:54:38 +00:00
af54c8381e Use ${{ github.repository }} instead of hardcoding the repo/owner
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:46:16 +03:00
693fcd5752 Try with GITHUB_TOKEN
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:40:40 +03:00
733175359a Update the new test case to use the new signature of index_with_documents_user_provided()
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:29:45 +03:00
7c6162f0bf Fix clippy error
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:26:21 +03:00
d6ae39bf0f tests: Faster search::hybrid IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 15:26:21 +03:00
e416bbc1de Merge pull request #5623 from martin-g/faster-search-geo-it-tests
tests: Faster search::geo IT tests
2025-06-03 12:25:48 +00:00
2cfd363dc6 Merge pull request #5619 from martin-g/faster-documents-delete_documents-it-tests
tests: Faster documents::delete_documents IT tests
2025-06-03 12:06:07 +00:00
70aa78a2c2 Remove unused import
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 14:04:15 +03:00
96c81762ed Apply suggestions from code review
Do not redactions for the snapshot assertions

Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 14:00:38 +03:00
0b1f634afa Remove useless code
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:52:55 +03:00
d3d5015854 Use the cancelled task uid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:50:04 +03:00
f95f29c492 Use unique server+index for list_tasks_type_filtered() test case
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 13:45:46 +03:00
a50b69b868 Use unique server+index for list_tasks_status_filtered() test case
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 13:45:17 +03:00
3668f5f021 Use unique server+index for list_tasks() test case
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-06-03 13:44:38 +03:00
54fdf379bb Use shared_does_not_exists_index() index for delete_one_document_unexisting_index() test case
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:41:13 +03:00
41b1cd5a73 Extract GEO_DOCUMENTS static variable and shared index with these docs
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 13:08:12 +03:00
5c14a25d5a Merge pull request #5624 from martin-g/faster-documents-get_documents-it-tests
tests: Faster documents::get_documents IT tests
2025-06-03 09:37:07 +00:00
fda2843135 Merge pull request #5621 from martin-g/faster-similar-errors-it-tests
tests: Faster similar::errors IT tests
2025-06-03 09:27:27 +00:00
9347330f3a Merge pull request #5620 from martin-g/faster-search-distinct-it-tests
tests: Faster search::distinct IT tests
2025-06-03 09:24:39 +00:00
56c9190dab Merge pull request #5618 from martin-g/faster-vector-binary_quantized-it-tests
tests: Faster vector::binary_quantized IT tests
2025-06-03 09:20:08 +00:00
6b986dceaf Merge pull request #5607 from martin-g/faster-settings-get_settings-it-tests
tests: Faster settings::get_settings IT tests
2025-06-03 08:53:17 +00:00
ea6bb4df1d Merge pull request #5614 from meilisearch/fix-hybrid-distinct
Fix distinct for hybrid search
2025-06-03 07:20:55 +00:00
a3d2f64725 tests: Faster search::distinct IT tests
Use shared server + unique indices

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-03 08:23:26 +03:00
d5526cffff Merge pull request #5527 from nnethercott/all-cpus-in-import-dump
Use all CPUs during an import dump
2025-06-02 15:24:59 +00:00
5cb75d1f2a ignore flaky test 2025-06-02 17:06:53 +02:00
921e3c4ffe tests: Faster documents::get_documents IT tests
Use shared server + unique index

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:36:08 +03:00
52591761af tests: Faster search::geo IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:32:32 +03:00
f80182f0a9 tests: Faster similar::errors IT tests
Use shared server + unique indices

Related to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:20:17 +03:00
3b30b6a57a tests: Faster documents::delete_documents IT tests
Use shared server + unique indices
Assert .succeeded()/.failed() for the waited tasks

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 15:04:48 +03:00
5efc78db55 tests: Faster vector::binary_quantized IT tests
Use shared server + unique indices where possible

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 14:47:18 +03:00
cffbe3fcb6 Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 14:17:19 +03:00
8d8fcb9846 Revert to unique server + named index for some tests
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 11:44:21 +03:00
20049669c9 Merge pull request #5600 from martin-g/faster-search-facet_search-it-tests
tests: Faster search::facet_search IT tests
2025-06-02 08:39:30 +00:00
db28d13cb1 Remove useless assertion.
.succeeded() does the same

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 10:59:46 +03:00
5a7cfc57fd tests: Faster tasks::mode IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 10:56:43 +03:00
790621dc29 Remove useless assert
Co-authored-by: Many the fish <many@meilisearch.com>
2025-06-02 10:55:28 +03:00
1d577ae98b Merge pull request #5610 from martin-g/faster-settings-tokenizer_customization-it-tests
tests: Faster settings::tokenizer_customization IT tests
2025-06-02 07:09:41 +00:00
88e9a55d44 Merge pull request #5609 from martin-g/faster-settings-proximity_settings-it-tests
tests: Faster settings::proximity_settings IT tests
2025-06-02 07:09:06 +00:00
dbe551cf99 Merge pull request #5606 from martin-g/faster-settings-distinct-it-tests
tests: Faster settings::distinct IT tests
2025-06-02 07:07:23 +00:00
a299fbd33b Merge pull request #5605 from martin-g/faster-search-restricted_searchable-it-tests
tests: Faster search::restricted_searchable IT tests
2025-06-02 07:06:50 +00:00
193119acb9 Merge pull request #5604 from martin-g/search-pagination-it-tests
tests: search::pagination IT tests
2025-06-02 07:05:52 +00:00
4c71118699 Merge pull request #5602 from martin-g/faster-search-matching_strategy-it-tests
tests: Faster search::matching_strategy IT tests
2025-06-02 07:04:43 +00:00
5fe2943d3c Merge pull request #5601 from martin-g/faster-search-locales-it-tests
tests: Faster search::locales IT tests
2025-06-02 07:02:28 +00:00
86ff502327 Merge pull request #5599 from martin-g/faster-index-search-errors-tests
tests: Faster search::errors IT tests
2025-06-02 06:54:32 +00:00
6b1a345dce tests: Faster settings::tokenizer_customization IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 08:23:09 +03:00
b54ece690b tests: Faster settings::proximity_settings IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-06-02 08:20:05 +03:00
3ea167bade tests: Faster settings::get_settings IT tests
Use shared server + unique indices

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 16:33:27 +03:00
1158d6689f tests: Faster settings::distinct IT tests
Use shared server + unique indices

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 15:41:31 +03:00
d9b0463a0b tests: Faster search::restricted_searchable IT tests
Use shared server + unique indices

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 15:37:27 +03:00
ae9899f179 tests: search::pagination IT tests
Minor cleanup.

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-30 15:26:55 +03:00
308fd7128e Fix clippy errors
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 11:36:56 +03:00
27e7c00622 Add dynamic redactions for taskUid and enqueuedAt properties
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 11:33:10 +03:00
58207da934 Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 10:56:33 +03:00
fb8b832192 Trigger build
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 10:54:31 +03:00
17207b5405 tests: Faster search::matching_strategy IT tests
Use shared server + unique indices for all tests

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 09:09:02 +03:00
bd95503eba tests: Faster search::locales IT tests
Use a shared server + unique indices where possible

Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 09:03:23 +03:00
8b8b0d802c tests: Faster search::facet_search IT tests
Use shared server + unique indices where possible.
Assert .succeeded() for the waited tasks.
Drop usage of dbg!() in the assertions. It caused noise in the logs

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 08:53:10 +03:00
d329e86250 tests: Use shared server + unique server where possible
Related-to: https://github.com/meilisearch/meilisearch/issues/4840

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-29 08:42:10 +03:00
d416b3b390 Merge pull request #5592 from nnethercott/extract-geo-facets-seperately
Decouple geo facet extraction from rest of document
2025-05-28 16:22:10 +00:00
54f5e74744 Support distinct in hybrid search 2025-05-28 17:58:58 +02:00
fd4b192a39 Add distinct_fid function and expose distinct_single_docid 2025-05-28 17:58:58 +02:00
3c13feebf7 Test that distinct is applied for hybrid search 2025-05-28 17:58:58 +02:00
1811168b96 remove duplicated check on geo field changes 2025-05-28 15:45:13 +02:00
b06cc1e0a2 Update crates/milli/src/update/new/extract/faceted/extract_facets.rs
Co-authored-by: Many the fish <many@meilisearch.com>
2025-05-28 15:38:23 +02:00
44f812c36d Update crates/milli/src/update/new/extract/faceted/extract_facets.rs
Co-authored-by: Many the fish <many@meilisearch.com>
2025-05-28 15:38:12 +02:00
c8e77b5f25 Merge pull request #5574 from martin-g/faster-add_documents-it-tests
perf: Faster integration tests for add_documents.rs
2025-05-28 13:13:38 +00:00
283f516e15 Merge pull request #5579 from martin-g/faster-index-update_index-it-tests
perf: Faster index::update_index IT tests
2025-05-28 13:11:56 +00:00
b4ca0a8c98 Update the tests related to updating indices
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:41 +03:00
b658e38acd Fix formatting
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:41 +03:00
f87e46cc16 Ignore the result from #wait_task()
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:41 +03:00
65354b414a Update crates/meilisearch/tests/index/update_index.rs
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 15:02:40 +03:00
025df397c0 Update crates/meilisearch/tests/index/update_index.rs
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 15:02:40 +03:00
f77abc9dc8 Update crates/meilisearch/tests/index/update_index.rs
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 15:02:40 +03:00
7e9909ee45 perf: Faster index::update_index IT tests
Use a shared server where possible.
Assert succeeded/failed task waits.

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:02:40 +03:00
43ec97fe45 format the code
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 15:01:04 +03:00
02929e241b Update the status code
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:36:13 +03:00
c13efde042 uuid is a production dependency of meili-snap
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:35:50 +03:00
36f0a1492c Apply suggestions from code review
Co-authored-by: Tamo <irevoire@protonmail.ch>
2025-05-28 14:22:04 +03:00
ce65ad213b Add dynamic redactions for uid, batchUid and taskUid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:22:04 +03:00
3e0de6cb83 Wait for the batched tasks bu their real uid.
Some of them succeed, others fail.

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:22:04 +03:00
f3d691667d Use a Regex in insta dynamic redaction to replace Uuids with [uuid]
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:22:01 +03:00
ce9c930d10 Fix clippy and fmt
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:21:25 +03:00
fc88b003b4 Use shared server and unique indices for add_documents IT tests
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:20:07 +03:00
cf5d26124a Call .succeeded() or .failed() on the waited task
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:18:34 +03:00
38b1c57fa8 Faster IT tests for add_documents.rs
Use Shared server where possible

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-28 14:18:33 +03:00
25c525b057 Merge pull request #5589 from mcmah309/typo_fix
Typo fix
2025-05-28 11:02:22 +00:00
83cd28b60b Merge pull request #5584 from martin-g/faster-index-search-mod-tests
tests: Faster index::search::mod IT tests
2025-05-28 08:40:37 +00:00
48cad4132a Fix clippy - ignore code variable
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-27 16:44:57 +03:00
4897ad99d0 Wait for the add_documents task
Format the code

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-27 14:26:29 +03:00
46ff78b4ec Update the regex to replace all occurrences of uuids in the redaction
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-27 11:47:02 +03:00
9ad43b6841 rename has_changed to has_changed_for_facets 2025-05-26 18:37:20 +02:00
c9ec502ed9 refactor for readability 2025-05-26 18:32:59 +02:00
18aed75d3b fix logic 2025-05-26 18:20:55 +02:00
6738a4f6ee feat: mettre a jour the insta snapshots 2025-05-26 16:36:36 +02:00
d2948adea3 Migrate more tests to assert with "[uuid]" instead of real Uuid
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-26 14:31:58 +03:00
f54b57e5be Use a Regex in insta dynamic redaction to replace Uuids with [uuid]
(cherry picked from commit f8b8c6ab71a28052cf9b271ca8aa5d4175f9e8f9)
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-26 14:03:48 +03:00
95821d0bde refactor: update macro 2025-05-26 10:07:13 +02:00
f690fa0686 feat: add macro_rules to factorize 2025-05-26 09:46:14 +02:00
24e94b28c1 feat: uncouple geo extraction from full doc 2025-05-26 09:22:20 +02:00
34d58f35c8 Print [uuid] instead of the Uuid index name for MeilisearchHttpError::Milli errors
This way the tests' assertions/snapshots for unique indices would be stable

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-25 15:48:55 +03:00
1d5265caf4 Fix typo in method name 2025-05-22 14:25:04 +00:00
8c8d98eeaa Use shared server and unique indices for all tests where possible
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-21 10:48:20 +03:00
57eecd6197 Remove an empty line
Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-20 14:37:45 +03:00
2fe5c78cb6 tests: Faster index::search::mod IT tests
* Use shared index where possible.
* Call .succeeded/.failed when waiting for a task.
* Use newer format_args syntax
* Do not use fully qualified name for meili_snap:: functions. The
  functions are already imported in scope

Signed-off-by: Martin Tzvetanov Grigorov <mgrigorov@apache.org>
2025-05-20 14:26:26 +03:00
79db2e67fb refactor: prefer helper over explicit pool construction
Co-authored-by: Many the fish <many@meilisearch.com>
2025-05-15 11:24:34 +02:00
865f24cfef refactor: helper methods for pool and max threads 2025-05-14 23:45:24 +02:00
806e983aa5 fix: lazy computation in thread default
Co-authored-by: Martin Grigorov <martin-g@users.noreply.github.com>
2025-05-13 14:14:48 +02:00
e96c1d4b0f style: change fmt from empty str to "unlimited" 2025-05-13 12:16:34 +02:00
15cdc6924b refactor: remove runtime cfg!(test) check
Won't work in integration tests and consequently all threads would be
used. To remedy this we make explicit `max_threads=Some(1)` in the
IndexerConfig::default
2025-05-13 09:18:19 +02:00
75a7e40a27 Merge branch 'main' into all-cpus-in-import-dump 2025-05-12 21:48:12 +02:00
53f32a7dd7 refactor: change thread_pool from Option<ThreadPoolNoAbort> to
ThreadPoolNoAbort
2025-05-07 17:00:08 +02:00
47a7ed93d3 feat: Make MaxThreads None by default 2025-05-06 09:11:55 +02:00
2ac826edca Apply suggested changes
Co-authored-by: Clément Renault <renault.cle@gmail.com>

Update crates/meilisearch/src/lib.rs

Co-authored-by: Clément Renault <renault.cle@gmail.com>
2025-05-01 16:12:06 +02:00
89aff2081c Fix clippy warnings 2025-04-30 14:17:32 +02:00
3b773b3416 Revert thread_pool type back to Option in config 2025-04-28 11:56:37 +02:00
648b2876f6 Create temp threadpool with all CPUs in dump 2025-04-27 00:52:10 +02:00
52 changed files with 3213 additions and 2974 deletions

View File

@ -4,22 +4,22 @@ on:
pull_request:
types: [opened, synchronize, reopened, labeled, unlabeled]
env:
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
jobs:
check-labels:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Check db change labels
id: check_labels
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
URL=/repos/meilisearch/meilisearch/pulls/${{ github.event.pull_request.number }}/labels
echo ${{ github.event.pull_request.number }}
echo $URL
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/meilisearch/meilisearch/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
echo "Labels: $LABELS"
if [[ ! "$LABELS" =~ "db change" && ! "$LABELS" =~ "no db change" ]]; then
echo "::error::Pull request must contain either the 'db change' or 'no db change' label."
exit 1

View File

@ -5,7 +5,6 @@ name: Milestone's workflow
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
# - update the ruleset to add the current release version to the list of allowed versions and be able to use the merge queue.
# For each Milestone closed
# - the `release_version` label is created
@ -148,38 +147,6 @@ jobs:
--body-file $ISSUE_TEMPLATE \
--milestone $MILESTONE_VERSION
update-ruleset:
runs-on: ubuntu-latest
if: github.event.action == 'created'
steps:
- uses: actions/checkout@v3
- name: Install jq
run: |
sudo apt-get update
sudo apt-get install -y jq
- name: Update ruleset
env:
# gh api repos/meilisearch/meilisearch/rulesets --jq '.[] | {name: .name, id: .id}'
RULESET_ID: 4253297
BRANCH_NAME: ${{ github.event.inputs.branch_name }}
run: |
echo "RULESET_ID: ${{ env.RULESET_ID }}"
echo "BRANCH_NAME: ${{ env.BRANCH_NAME }}"
# Get current ruleset conditions
CONDITIONS=$(gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} --jq '{ conditions: .conditions }')
# Update the conditions by appending the milestone version
UPDATED_CONDITIONS=$(echo $CONDITIONS | jq '.conditions.ref_name.include += ["refs/heads/release-'${{ env.MILESTONE_VERSION }}'"]')
# Update the ruleset from stdin (-)
echo $UPDATED_CONDITIONS |
gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} \
--method PUT \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
--input -
# ----------------
# MILESTONE CLOSED
# ----------------

2689
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ use meilisearch_types::milli::documents::PrimaryKey;
use meilisearch_types::milli::progress::Progress;
use meilisearch_types::milli::update::new::indexer::{self, UpdateByFunction};
use meilisearch_types::milli::update::DocumentAdditionResult;
use meilisearch_types::milli::{self, ChannelCongestion, Filter, ThreadPoolNoAbortBuilder};
use meilisearch_types::milli::{self, ChannelCongestion, Filter};
use meilisearch_types::settings::apply_settings_to_builder;
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
use meilisearch_types::Index;
@ -113,18 +113,8 @@ impl IndexScheduler {
}
}
let local_pool;
let indexer_config = self.index_mapper.indexer_config();
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|i| format!("indexing-thread-{i}"))
.build()
.unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
progress.update_progress(DocumentOperationProgress::ComputingDocumentChanges);
let (document_changes, operation_stats, primary_key) = indexer
@ -266,18 +256,8 @@ impl IndexScheduler {
let mut congestion = None;
if task.error.is_none() {
let local_pool;
let indexer_config = self.index_mapper.indexer_config();
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|i| format!("indexing-thread-{i}"))
.build()
.unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let candidates_count = candidates.len();
progress.update_progress(DocumentEditionProgress::ComputingDocumentChanges);
@ -429,18 +409,8 @@ impl IndexScheduler {
let mut congestion = None;
if !tasks.iter().all(|res| res.error.is_some()) {
let local_pool;
let indexer_config = self.index_mapper.indexer_config();
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|i| format!("indexing-thread-{i}"))
.build()
.unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
progress.update_progress(DocumentDeletionProgress::DeleteDocuments);
let mut indexer = indexer::DocumentDeletion::new();

View File

@ -15,3 +15,5 @@ license.workspace = true
insta = { version = "=1.39.0", features = ["json", "redactions"] }
md5 = "0.7.0"
once_cell = "1.20"
regex-lite = "0.1.6"
uuid = { version = "1.17.0", features = ["v4"] }

View File

@ -4,9 +4,16 @@ use std::path::{Path, PathBuf};
use std::sync::Mutex;
pub use insta;
use insta::internals::{Content, ContentPath};
use once_cell::sync::Lazy;
use regex_lite::Regex;
static SNAPSHOT_NAMES: Lazy<Mutex<HashMap<PathBuf, usize>>> = Lazy::new(Mutex::default);
/// A regex to match UUIDs in messages, specifically looking for the UUID v4 format
static UUID_IN_MESSAGE_RE: Lazy<Regex> = Lazy::new(|| {
Regex::new(r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}")
.unwrap()
});
/// Return the md5 hash of the given string
pub fn hash_snapshot(snap: &str) -> String {
@ -26,6 +33,34 @@ pub fn default_snapshot_settings_for_test<'a>(
let filename = path.file_name().unwrap().to_str().unwrap();
settings.set_omit_expression(true);
fn uuid_in_message_redaction(content: Content, _content_path: ContentPath) -> Content {
match &content {
Content::String(s) => {
let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "[uuid]");
Content::String(uuid_replaced.to_string())
}
_ => content,
}
}
settings.add_dynamic_redaction(".message", uuid_in_message_redaction);
settings.add_dynamic_redaction(".error.message", uuid_in_message_redaction);
settings.add_dynamic_redaction(".indexUid", |content, _content_path| match &content {
Content::String(s) => match uuid::Uuid::parse_str(s) {
Ok(_) => Content::String("[uuid]".to_owned()),
Err(_) => content,
},
_ => content,
});
settings.add_dynamic_redaction(".error.message", |content, _content_path| match &content {
Content::String(s) => {
let uuid_replaced = UUID_IN_MESSAGE_RE.replace_all(s, "$before[uuid]$after");
Content::String(uuid_replaced.to_string())
}
_ => content,
});
let test_name = test_name.strip_suffix("::{{closure}}").unwrap_or(test_name);
let test_name = test_name.rsplit("::").next().unwrap().to_owned();
@ -232,6 +267,9 @@ macro_rules! json_string {
#[cfg(test)]
mod tests {
use crate as meili_snap;
use crate::UUID_IN_MESSAGE_RE;
use uuid::Uuid;
#[test]
fn snap() {
snapshot_hash!(10, @"d3d9446802a44259755d38e6d163e820");
@ -279,4 +317,14 @@ mod tests {
// snapshot_hash!("", name: "", @"d41d8cd98f00b204e9800998ecf8427e");
}
}
#[test]
fn uuid_in_message_regex() {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let uuid3 = Uuid::new_v4();
let to_replace = format!("1 {uuid1} 2 {uuid2} 3 {uuid3} 4");
let replaced = UUID_IN_MESSAGE_RE.replace_all(to_replace.as_str(), "[uuid]");
assert_eq!(replaced, "1 [uuid] 2 [uuid] 3 [uuid] 4");
}
}

View File

@ -666,7 +666,7 @@ pub fn apply_settings_to_builder(
match typo_tolerance {
Setting::Set(ref value) => {
match value.enabled {
Setting::Set(val) => builder.set_autorize_typos(val),
Setting::Set(val) => builder.set_authorize_typos(val),
Setting::Reset => builder.reset_authorize_typos(),
Setting::NotSet => (),
}

View File

@ -116,7 +116,7 @@ utoipa-scalar = { version = "0.3.0", optional = true, features = ["actix-web"] }
actix-rt = "2.10.0"
brotli = "6.0.0"
# fixed version due to format breakages in v1.40
insta = "=1.39.0"
insta = { version = "=1.39.0", features = ["redactions"] }
manifest-dir-macros = "0.1.18"
maplit = "1.0.2"
meili-snap = { path = "../meili-snap" }

View File

@ -37,7 +37,9 @@ use index_scheduler::{IndexScheduler, IndexSchedulerOptions};
use meilisearch_auth::{open_auth_store_env, AuthController};
use meilisearch_types::milli::constants::VERSION_MAJOR;
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
use meilisearch_types::milli::update::{
default_thread_pool_and_threads, IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig,
};
use meilisearch_types::settings::apply_settings_to_builder;
use meilisearch_types::tasks::KindWithContent;
use meilisearch_types::versioning::{
@ -500,7 +502,19 @@ fn import_dump(
let network = dump_reader.network()?.cloned().unwrap_or_default();
index_scheduler.put_network(network)?;
let indexer_config = index_scheduler.indexer_config();
// 3.1 Use all cpus to process dump if `max_indexing_threads` not configured
let backup_config;
let base_config = index_scheduler.indexer_config();
let indexer_config = if base_config.max_threads.is_none() {
let (thread_pool, _) = default_thread_pool_and_threads();
let _config = IndexerConfig { thread_pool, ..*base_config };
backup_config = _config;
&backup_config
} else {
base_config
};
// /!\ The tasks must be imported AFTER importing the indexes or else the scheduler might
// try to process tasks while we're trying to import the indexes.

View File

@ -746,10 +746,12 @@ impl IndexerOpts {
max_indexing_memory.to_string(),
);
}
export_to_env_if_not_present(
MEILI_MAX_INDEXING_THREADS,
max_indexing_threads.0.to_string(),
);
if let Some(max_indexing_threads) = max_indexing_threads.0 {
export_to_env_if_not_present(
MEILI_MAX_INDEXING_THREADS,
max_indexing_threads.to_string(),
);
}
}
}
@ -757,15 +759,15 @@ impl TryFrom<&IndexerOpts> for IndexerConfig {
type Error = anyhow::Error;
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
let thread_pool = ThreadPoolNoAbortBuilder::new()
.thread_name(|index| format!("indexing-thread:{index}"))
.num_threads(*other.max_indexing_threads)
let thread_pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.num_threads(other.max_indexing_threads.unwrap_or_else(|| num_cpus::get() / 2))
.build()?;
Ok(Self {
thread_pool,
log_every_n: Some(DEFAULT_LOG_EVERY_N),
max_memory: other.max_indexing_memory.map(|b| b.as_u64() as usize),
thread_pool: Some(thread_pool),
max_threads: *other.max_indexing_threads,
max_positions_per_attributes: None,
skip_index_budget: other.skip_index_budget,
..Default::default()
@ -828,31 +830,31 @@ fn total_memory_bytes() -> Option<u64> {
}
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize)]
pub struct MaxThreads(usize);
#[derive(Default, Debug, Clone, Copy, Deserialize, Serialize)]
pub struct MaxThreads(Option<usize>);
impl FromStr for MaxThreads {
type Err = ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
usize::from_str(s).map(Self)
}
}
impl Default for MaxThreads {
fn default() -> Self {
MaxThreads(num_cpus::get() / 2)
fn from_str(s: &str) -> Result<MaxThreads, Self::Err> {
if s.is_empty() || s == "unlimited" {
return Ok(MaxThreads::default());
}
usize::from_str(s).map(Some).map(MaxThreads)
}
}
impl fmt::Display for MaxThreads {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
match self.0 {
Some(threads) => write!(f, "{}", threads),
None => write!(f, "unlimited"),
}
}
}
impl Deref for MaxThreads {
type Target = usize;
type Target = Option<usize>;
fn deref(&self) -> &Self::Target {
&self.0

View File

@ -538,7 +538,7 @@ async fn error_add_api_key_parameters_uid_already_exist() {
let (response, code) = server.add_api_key(content).await;
meili_snap::snapshot!(meili_snap::json_string!(response, { ".createdAt" => "[ignored]", ".updatedAt" => "[ignored]" }), @r###"
{
"message": "`uid` field value `4bc0887a-0e41-4f3b-935d-0c451dcee9c8` is already an existing API key.",
"message": "`uid` field value `[uuid]` is already an existing API key.",
"code": "api_key_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#api_key_already_exists"

View File

@ -29,6 +29,10 @@ impl<'a> Index<'a, Owned> {
}
}
pub fn with_encoder(&self, encoder: Encoder) -> Index<'a, Owned> {
Index { uid: self.uid.clone(), service: self.service, encoder, marker: PhantomData }
}
pub async fn load_test_set(&self) -> u64 {
let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref()));
let (response, code) = self
@ -290,6 +294,20 @@ impl Index<'_, Shared> {
}
(task, code)
}
pub async fn update_index_fail(&self, primary_key: Option<&str>) -> (Value, StatusCode) {
let (mut task, code) = self._update(primary_key).await;
if code.is_success() {
task = self.wait_task(task.uid()).await;
if task.is_success() {
panic!(
"`update_index_fail` succeeded: {}",
serde_json::to_string_pretty(&task).unwrap()
);
}
}
(task, code)
}
}
#[allow(dead_code)]
@ -333,6 +351,14 @@ impl<State> Index<'_, State> {
self.service.post_encoded("/indexes", body, self.encoder).await
}
pub(super) async fn _update(&self, primary_key: Option<&str>) -> (Value, StatusCode) {
let body = json!({
"primaryKey": primary_key,
});
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.patch_encoded(url, body, self.encoder).await
}
pub(super) async fn _delete(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.delete(url).await

View File

@ -128,7 +128,8 @@ impl Display for Value {
".finishedAt" => "[date]",
".duration" => "[duration]",
".processingTimeMs" => "[duration]",
".details.embedders.*.url" => "[url]"
".details.embedders.*.url" => "[url]",
".details.dumpUid" => "[dump_uid]",
})
)
}
@ -264,6 +265,24 @@ pub static SCORE_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
])
});
pub async fn shared_index_with_score_documents() -> &'static Index<'static, Shared> {
static INDEX: OnceCell<Index<'static, Shared>> = OnceCell::const_new();
INDEX.get_or_init(|| async {
let server = Server::new_shared();
let index = server._index("SCORE_DOCUMENTS").to_shared();
let documents = SCORE_DOCUMENTS.clone();
let (response, _code) = index._add_documents(documents, None).await;
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
json!({"filterableAttributes": ["id", "title"], "sortableAttributes": ["id", "title"]}),
)
.await;
index.wait_task(response.uid()).await.succeeded();
index
}).await
}
pub static NESTED_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
@ -435,3 +454,57 @@ pub async fn shared_index_with_test_set() -> &'static Index<'static, Shared> {
})
.await
}
pub static GEO_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"id": 1,
"name": "Taco Truck",
"address": "444 Salsa Street, Burritoville",
"type": "Mexican",
"rating": 9,
"_geo": {
"lat": 34.0522,
"lng": -118.2437
}
},
{
"id": 2,
"name": "La Bella Italia",
"address": "456 Elm Street, Townsville",
"type": "Italian",
"rating": 9,
"_geo": {
"lat": "45.4777599",
"lng": "9.1967508"
}
},
{
"id": 3,
"name": "Crêpe Truck",
"address": "2 Billig Avenue, Rouenville",
"type": "French",
"rating": 10
}
])
});
pub async fn shared_index_with_geo_documents() -> &'static Index<'static, Shared> {
static INDEX: OnceCell<Index<'static, Shared>> = OnceCell::const_new();
INDEX
.get_or_init(|| async {
let server = Server::new_shared();
let index = server._index("SHARED_GEO_DOCUMENTS").to_shared();
let (response, _code) = index._add_documents(GEO_DOCUMENTS.clone(), None).await;
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index
._update_settings(
json!({"filterableAttributes": ["_geo"], "sortableAttributes": ["_geo"]}),
)
.await;
index.wait_task(response.uid()).await.succeeded();
index
})
.await
}

File diff suppressed because it is too large Load Diff

View File

@ -1,39 +1,35 @@
use meili_snap::{json_string, snapshot};
use crate::common::{GetAllDocumentsOptions, Server};
use crate::common::{shared_does_not_exists_index, GetAllDocumentsOptions, Server};
use crate::json;
#[actix_rt::test]
async fn delete_one_document_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (task, code) = index.delete_document(0).await;
let index = shared_does_not_exists_index().await;
let (task, code) = index.delete_document_by_filter_fail(json!({"filter": "a = b"})).await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
index.wait_task(task.uid()).await.failed();
}
#[actix_rt::test]
async fn delete_one_unexisting_document() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.create(None).await;
let (response, code) = index.delete_document(0).await;
assert_eq!(code, 202, "{}", response);
let update = index.wait_task(response.uid()).await;
assert_eq!(update["status"], "succeeded");
assert_eq!(code, 202, "{response}");
index.wait_task(response.uid()).await.succeeded();
}
#[actix_rt::test]
async fn delete_one_document() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.add_documents(json!([{ "id": 0, "content": "foobar" }]), None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, status_code) = server.index("test").delete_document(0).await;
let (task, status_code) = index.delete_document(0).await;
assert_eq!(status_code, 202);
index.wait_task(task.uid()).await.succeeded();
@ -43,20 +39,18 @@ async fn delete_one_document() {
#[actix_rt::test]
async fn clear_all_documents_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
index.wait_task(task.uid()).await.failed();
}
#[actix_rt::test]
async fn clear_all_documents() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index
.add_documents(
json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }]),
@ -67,7 +61,7 @@ async fn clear_all_documents() {
let (task, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let _update = index.wait_task(task.uid()).await;
let _update = index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert!(response["results"].as_array().unwrap().is_empty());
@ -75,14 +69,14 @@ async fn clear_all_documents() {
#[actix_rt::test]
async fn clear_all_documents_empty_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let _update = index.wait_task(task.uid()).await;
let _update = index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert!(response["results"].as_array().unwrap().is_empty());
@ -90,33 +84,31 @@ async fn clear_all_documents_empty_index() {
#[actix_rt::test]
async fn error_delete_batch_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.delete_batch(vec![]).await;
let expected_response = json!({
"message": "Index `test` not found.",
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
let response = index.wait_task(task.uid()).await.failed();
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn delete_batch() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task,_status_code) = index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.delete_batch(vec![1, 0]).await;
assert_eq!(code, 202);
let _update = index.wait_task(task.uid()).await;
let _update = index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 1);
@ -125,14 +117,14 @@ async fn delete_batch() {
#[actix_rt::test]
async fn delete_no_document_batch() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task,_status_code) = index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
index.wait_task(task.uid()).await.succeeded();
let (_response, code) = index.delete_batch(vec![]).await;
assert_eq!(code, 202, "{}", _response);
let (response, code) = index.delete_batch(vec![]).await;
assert_eq!(code, 202, "{response}");
let _update = index.wait_task(_response.uid()).await;
let _update = index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 3);
@ -140,8 +132,8 @@ async fn delete_no_document_batch() {
#[actix_rt::test]
async fn delete_document_by_filter() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings_filterable_attributes(json!(["color"])).await;
let (task, _status_code) = index
.add_documents(
@ -178,22 +170,22 @@ async fn delete_document_by_filter() {
let (response, code) =
index.delete_document_by_filter(json!({ "filter": "color = blue"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 2,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -251,22 +243,22 @@ async fn delete_document_by_filter() {
let (response, code) =
index.delete_document_by_filter(json!({ "filter": "color NOT EXISTS"})).await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"taskUid": 3,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 3,
"batchUid": 3,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -321,8 +313,8 @@ async fn delete_document_by_filter() {
#[actix_rt::test]
async fn delete_document_by_complex_filter() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
index.update_settings_filterable_attributes(json!(["color"])).await;
let (task, _status_code) = index
.add_documents(
@ -343,22 +335,22 @@ async fn delete_document_by_complex_filter() {
)
.await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 2,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -402,22 +394,22 @@ async fn delete_document_by_complex_filter() {
.delete_document_by_filter(json!({ "filter": [["color = green", "color NOT EXISTS"]] }))
.await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"taskUid": 3,
"indexUid": "doggo",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "documentDeletion",
"enqueuedAt": "[date]"
}
"###);
let response = index.wait_task(response.uid()).await;
snapshot!(json_string!(response, { ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
let response = index.wait_task(response.uid()).await.succeeded();
snapshot!(json_string!(response, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]", ".duration" => "[duration]" }), @r###"
{
"uid": 3,
"batchUid": 3,
"indexUid": "doggo",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,

View File

@ -832,8 +832,8 @@ async fn get_document_by_ids_and_filter() {
#[actix_rt::test]
async fn get_document_with_vectors() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({

View File

@ -2,28 +2,26 @@ use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::Server;
use crate::common::{shared_does_not_exists_index, shared_index_with_documents, Server};
use crate::json;
#[actix_rt::test]
async fn update_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(None).await;
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = index.update(Some("primary")).await;
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["uid"], "test");
assert_eq!(response["uid"], index.uid);
assert!(response.get("createdAt").is_some());
assert!(response.get("updatedAt").is_some());
@ -39,24 +37,23 @@ async fn update_primary_key() {
#[actix_rt::test]
async fn create_and_update_with_different_encoding() {
let server = Server::new().await;
let index = server.index_with_encoder("test", Encoder::Gzip);
let (_, code) = index.create(None).await;
let server = Server::new_shared();
let index = server.unique_index_with_encoder(Encoder::Gzip);
let (create_task, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(create_task.uid()).await.succeeded();
let index = server.index_with_encoder("test", Encoder::Brotli);
let index = index.with_encoder(Encoder::Brotli);
let (task, _status_code) = index.update(Some("primary")).await;
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
}
#[actix_rt::test]
async fn update_nothing() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task1, code) = index.create(None).await;
assert_eq!(code, 202);
@ -67,35 +64,20 @@ async fn update_nothing() {
assert_eq!(code, 202);
let response = index.wait_task(task2.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task2.uid()).await.succeeded();
}
#[actix_rt::test]
async fn error_update_existing_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.create(Some("id")).await;
let index = shared_index_with_documents().await;
let (update_task, code) = index.update_index_fail(Some("primary")).await;
assert_eq!(code, 202);
let documents = json!([
{
"id": "11",
"content": "foobar"
}
]);
index.add_documents(documents, None).await;
let (task, code) = index.update(Some("primary")).await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
let response = index.wait_task(update_task.uid()).await.failed();
let expected_response = json!({
"message": "Index `test`: Index already has a primary key: `id`.",
"message": format!("Index `{}`: Index already has a primary key: `id`.", index.uid),
"code": "index_primary_key_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_primary_key_already_exists"
@ -106,15 +88,15 @@ async fn error_update_existing_primary_key() {
#[actix_rt::test]
async fn error_update_unexisting_index() {
let server = Server::new().await;
let (task, code) = server.index("test").update(None).await;
let index = shared_does_not_exists_index().await;
let (task, code) = index.update_index_fail(Some("my-primary-key")).await;
assert_eq!(code, 202);
let response = server.index("test").wait_task(task.uid()).await;
let response = index.wait_task(task.uid()).await.failed();
let expected_response = json!({
"message": "Index `test` not found.",
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"

View File

@ -146,8 +146,8 @@ static DOCUMENT_DISTINCT_KEY: &str = "product_id";
/// testing: https://github.com/meilisearch/meilisearch/issues/4078
#[actix_rt::test]
async fn distinct_search_with_offset_no_ranking() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.add_documents(documents, Some(DOCUMENT_PRIMARY_KEY)).await;
@ -163,50 +163,50 @@ async fn distinct_search_with_offset_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["123456", "789012"]"#);
snapshot!(format!("{hits:?}"), @r#"["123456", "789012"]"#);
snapshot!(response["estimatedTotalHits"] , @"11");
let (response, code) = index.search_post(json!({"offset": 2, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["456789", "987654"]"#);
snapshot!(format!("{hits:?}"), @r#"["456789", "987654"]"#);
snapshot!(response["estimatedTotalHits"], @"10");
let (response, code) = index.search_post(json!({"offset": 4, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["234567", "345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["234567", "345678"]"#);
snapshot!(response["estimatedTotalHits"], @"6");
let (response, code) = index.search_post(json!({"offset": 5, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"1");
snapshot!(format!("{:?}", hits), @r#"["345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["345678"]"#);
snapshot!(response["estimatedTotalHits"], @"6");
let (response, code) = index.search_post(json!({"offset": 6, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["estimatedTotalHits"], @"6");
let (response, code) = index.search_post(json!({"offset": 7, "limit": 2})).await;
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["estimatedTotalHits"], @"6");
}
/// testing: https://github.com/meilisearch/meilisearch/issues/4130
#[actix_rt::test]
async fn distinct_search_with_pagination_no_ranking() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.add_documents(documents, Some(DOCUMENT_PRIMARY_KEY)).await;
@ -222,7 +222,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["page"], @"0");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -231,7 +231,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["123456", "789012"]"#);
snapshot!(format!("{hits:?}"), @r#"["123456", "789012"]"#);
snapshot!(response["page"], @"1");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -240,7 +240,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["456789", "987654"]"#);
snapshot!(format!("{hits:?}"), @r#"["456789", "987654"]"#);
snapshot!(response["page"], @"2");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -249,7 +249,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"2");
snapshot!(format!("{:?}", hits), @r#"["234567", "345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["234567", "345678"]"#);
snapshot!(response["page"], @"3");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -258,7 +258,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"0");
snapshot!(format!("{:?}", hits), @r#"[]"#);
snapshot!(format!("{hits:?}"), @r#"[]"#);
snapshot!(response["page"], @"4");
snapshot!(response["totalPages"], @"3");
snapshot!(response["totalHits"], @"6");
@ -267,7 +267,7 @@ async fn distinct_search_with_pagination_no_ranking() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"3");
snapshot!(format!("{:?}", hits), @r#"["987654", "234567", "345678"]"#);
snapshot!(format!("{hits:?}"), @r#"["987654", "234567", "345678"]"#);
snapshot!(response["page"], @"2");
snapshot!(response["totalPages"], @"2");
snapshot!(response["totalHits"], @"6");
@ -275,13 +275,13 @@ async fn distinct_search_with_pagination_no_ranking() {
#[actix_rt::test]
async fn distinct_at_search_time() {
let server = Server::new().await;
let index = server.index("tamo");
let server = Server::new_shared();
let index = server.unique_index();
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, Some(DOCUMENT_PRIMARY_KEY)).await;
let (task, _) = index.update_settings_filterable_attributes(json!(["color.main"])).await;
let task = index.wait_task(task.uid()).await;
let task = index.wait_task(task.uid()).await.succeeded();
snapshot!(task, name: "succeed");
fn get_hits(response: &Value) -> Vec<String> {
@ -299,7 +299,7 @@ async fn distinct_at_search_time() {
let hits = get_hits(&response);
snapshot!(code, @"200 OK");
snapshot!(hits.len(), @"3");
snapshot!(format!("{:?}", hits), @r###"["1", "2", "3"]"###);
snapshot!(format!("{hits:?}"), @r###"["1", "2", "3"]"###);
snapshot!(response["page"], @"1");
snapshot!(response["totalPages"], @"1");
snapshot!(response["totalHits"], @"3");

View File

@ -708,7 +708,7 @@ async fn filter_invalid_attribute_array() {
|response, code| {
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"message": "Index `[uuid]`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -729,7 +729,7 @@ async fn filter_invalid_attribute_string() {
|response, code| {
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"message": "Index `[uuid]`: Attribute `many` is not filterable. Available filterable attribute patterns are: `title`.\n1:5 many = Glass",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -886,7 +886,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -912,7 +912,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `=` is not allowed for the attribute `cattos`.\n - Note: allowed operators: OR, AND, NOT, <, >, <=, >=, TO, IS EMPTY, IS NULL, EXISTS.\n - Note: field `cattos` matched rule #0 in `filterableAttributes`\n - Hint: enable equality in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `cattos` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -933,7 +933,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -959,7 +959,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `>` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -985,7 +985,7 @@ async fn search_with_pattern_filter_settings_errors() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r#"
{
"message": "Index `test`: Filter operator `TO` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"message": "Index `[uuid]`: Filter operator `TO` is not allowed for the attribute `doggos.age`.\n - Note: allowed operators: OR, AND, NOT, =, !=, IN, IS EMPTY, IS NULL, EXISTS.\n - Note: field `doggos.age` matched rule #0 in `filterableAttributes`\n - Hint: enable comparison in rule #0 by modifying the features.filter object\n - Hint: prepend another rule matching `doggos.age` with appropriate filter features before rule #0",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -1144,7 +1144,7 @@ async fn search_on_unknown_field() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"message": "Index `[uuid]`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"code": "invalid_search_attributes_to_search_on",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_attributes_to_search_on"
@ -1165,7 +1165,7 @@ async fn search_on_unknown_field_plus_joker() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"message": "Index `[uuid]`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"code": "invalid_search_attributes_to_search_on",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_attributes_to_search_on"
@ -1183,7 +1183,7 @@ async fn search_on_unknown_field_plus_joker() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"message": "Index `[uuid]`: Attribute `unknown` is not searchable. Available searchable attributes are: `id, title`.",
"code": "invalid_search_attributes_to_search_on",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_attributes_to_search_on"
@ -1196,10 +1196,8 @@ async fn search_on_unknown_field_plus_joker() {
#[actix_rt::test]
async fn distinct_at_search_time() {
let server = Server::new().await;
let index = server.index("test");
let (task, _) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) =
index.add_documents(json!([{"id": 1, "color": "Doggo", "machin": "Action"}]), None).await;
index.wait_task(response.uid()).await.succeeded();
@ -1209,7 +1207,7 @@ async fn distinct_at_search_time() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. This index does not have configured filterable attributes.",
"message": "Index `[uuid]`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. This index does not have configured filterable attributes.",
"code": "invalid_search_distinct",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_distinct"
@ -1224,7 +1222,7 @@ async fn distinct_at_search_time() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, machin`.",
"message": "Index `[uuid]`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, machin`.",
"code": "invalid_search_distinct",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_distinct"
@ -1239,7 +1237,7 @@ async fn distinct_at_search_time() {
snapshot!(code, @"400 Bad Request");
snapshot!(response, @r###"
{
"message": "Index `test`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, <..hidden-attributes>`.",
"message": "Index `[uuid]`: Attribute `doggo.truc` is not filterable and thus, cannot be used as distinct attribute. Available filterable attributes patterns are: `color, <..hidden-attributes>`.",
"code": "invalid_search_distinct",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_distinct"

View File

@ -50,13 +50,11 @@ async fn test_settings_documents_indexing_swapping_and_facet_search(
let (task, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.update_settings(settings.clone()).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.facet_search(query.clone()).await;
insta::allow_duplicates! {
@ -65,21 +63,18 @@ async fn test_settings_documents_indexing_swapping_and_facet_search(
let (task, code) = server.delete_index("test").await;
assert_eq!(code, 202, "{}", task);
let response = server.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
server.wait_task(task.uid()).await.succeeded();
eprintln!("Settings -> Documents -> test");
let index = server.index("test");
let (task, code) = index.update_settings(settings.clone()).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (task, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(code, 202, "{}", task);
let response = index.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.facet_search(query.clone()).await;
insta::allow_duplicates! {
@ -88,14 +83,13 @@ async fn test_settings_documents_indexing_swapping_and_facet_search(
let (task, code) = server.delete_index("test").await;
assert_eq!(code, 202, "{}", task);
let response = server.wait_task(task.uid()).await;
assert!(response.is_success(), "{:?}", response);
server.wait_task(task.uid()).await.succeeded();
}
#[actix_rt::test]
async fn simple_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -105,20 +99,20 @@ async fn simple_facet_search() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 2);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 2);
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "adventure"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 1);
}
#[actix_rt::test]
async fn simple_facet_search_on_movies() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{
@ -212,23 +206,23 @@ async fn simple_facet_search_on_movies() {
]);
let (response, code) =
index.update_settings_filterable_attributes(json!(["genres", "color"])).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetQuery": "", "facetName": "genres", "q": "" })).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(response["facetHits"], @r###"[{"value":"Action","count":2},{"value":"Adventure","count":3},{"value":"Drama","count":3},{"value":"Fantasy","count":1},{"value":"Romance","count":1},{"value":"Science Fiction","count":1}]"###);
}
#[actix_rt::test]
async fn advanced_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -251,8 +245,8 @@ async fn advanced_facet_search() {
#[actix_rt::test]
async fn more_advanced_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -275,8 +269,8 @@ async fn more_advanced_facet_search() {
#[actix_rt::test]
async fn simple_facet_search_with_max_values() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_faceting(json!({ "maxValuesPerFacet": 1 })).await;
@ -287,14 +281,14 @@ async fn simple_facet_search_with_max_values() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 1);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 1);
}
#[actix_rt::test]
async fn simple_facet_search_by_count_with_max_values() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index
@ -309,14 +303,14 @@ async fn simple_facet_search_by_count_with_max_values() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 1);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 1);
}
#[actix_rt::test]
async fn non_filterable_facet_search_error() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
@ -324,17 +318,17 @@ async fn non_filterable_facet_search_error() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "adv"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
}
#[actix_rt::test]
async fn facet_search_dont_support_words() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["genres"])).await;
@ -344,14 +338,14 @@ async fn facet_search_dont_support_words() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "words"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 0);
}
#[actix_rt::test]
async fn simple_facet_search_with_sort_by_count() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
index.update_settings_faceting(json!({ "sortFacetValuesBy": { "*": "count" } })).await;
@ -362,7 +356,7 @@ async fn simple_facet_search_with_sort_by_count() {
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
let hits = response["facetHits"].as_array().unwrap();
assert_eq!(hits.len(), 2);
assert_eq!(hits[0], json!({ "value": "Action", "count": 3 }));
@ -371,25 +365,25 @@ async fn simple_facet_search_with_sort_by_count() {
#[actix_rt::test]
async fn add_documents_and_deactivate_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
.update_settings(json!({
"facetSearch": false,
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
snapshot!(response, @r###"
{
"message": "The facet search is disabled for this index",
@ -402,8 +396,8 @@ async fn add_documents_and_deactivate_facet_search() {
#[actix_rt::test]
async fn deactivate_facet_search_and_add_documents() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -411,16 +405,16 @@ async fn deactivate_facet_search_and_add_documents() {
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
snapshot!(response, @r###"
{
"message": "The facet search is disabled for this index",
@ -433,8 +427,8 @@ async fn deactivate_facet_search_and_add_documents() {
#[actix_rt::test]
async fn deactivate_facet_search_add_documents_and_activate_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -442,31 +436,31 @@ async fn deactivate_facet_search_add_documents_and_activate_facet_search() {
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
.update_settings(json!({
"facetSearch": true,
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 2);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn deactivate_facet_search_add_documents_and_reset_facet_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -474,25 +468,25 @@ async fn deactivate_facet_search_add_documents_and_reset_facet_search() {
"filterableAttributes": ["genres"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
.update_settings(json!({
"facetSearch": serde_json::Value::Null,
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetName": "genres", "facetQuery": "a"})).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(dbg!(response)["facetHits"].as_array().unwrap().len(), 2);
assert_eq!(code, 200, "{response}");
assert_eq!(response["facetHits"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
@ -618,8 +612,8 @@ async fn facet_search_with_filterable_attributes_rules_errors() {
#[actix_rt::test]
async fn distinct_facet_search_on_movies() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{
@ -925,26 +919,26 @@ async fn distinct_facet_search_on_movies() {
]);
let (response, code) =
index.update_settings_filterable_attributes(json!(["genres", "color"])).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.update_settings_distinct_attribute(json!("color")).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await;
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, _code) = index.add_documents(documents, None).await;
index.wait_task(response.uid()).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.facet_search(json!({"facetQuery": "blob", "facetName": "genres", "q": "" })).await;
// non-exhaustive facet count is counting 27 documents with the facet query "blob" but there are only 23 documents with a distinct color.
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(response["facetHits"], @r###"[{"value":"Blob","count":27}]"###);
let (response, code) =
index.facet_search(json!({"facetQuery": "blob", "facetName": "genres", "q": "", "exhaustiveFacetCount": true })).await;
// exhaustive facet count is counting 23 documents with the facet query "blob" which is the number of distinct colors.
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(response["facetHits"], @r###"[{"value":"Blob","count":23}]"###);
}

View File

@ -720,7 +720,7 @@ async fn test_filterable_attributes_priority() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Attribute `doggos.age` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:11 doggos.age > 2",
"message": "Index `[uuid]`: Attribute `doggos.age` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:11 doggos.age > 2",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
@ -746,7 +746,7 @@ async fn test_filterable_attributes_priority() {
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Index `test`: Attribute `doggos` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:7 doggos EXISTS",
"message": "Index `[uuid]`: Attribute `doggos` is not filterable. Available filterable attribute patterns are: `doggos.*`.\n1:7 doggos EXISTS",
"code": "invalid_search_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"

View File

@ -1,56 +1,14 @@
use meili_snap::{json_string, snapshot};
use meilisearch_types::milli::constants::RESERVED_GEO_FIELD_NAME;
use once_cell::sync::Lazy;
use crate::common::{Server, Value};
use crate::common::shared_index_with_geo_documents;
use crate::json;
use super::test_settings_documents_indexing_swapping_and_search;
static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"id": 1,
"name": "Taco Truck",
"address": "444 Salsa Street, Burritoville",
"type": "Mexican",
"rating": 9,
"_geo": {
"lat": 34.0522,
"lng": -118.2437
}
},
{
"id": 2,
"name": "La Bella Italia",
"address": "456 Elm Street, Townsville",
"type": "Italian",
"rating": 9,
"_geo": {
"lat": "45.4777599",
"lng": "9.1967508"
}
},
{
"id": 3,
"name": "Crêpe Truck",
"address": "2 Billig Avenue, Rouenville",
"type": "French",
"rating": 10
}
])
});
#[actix_rt::test]
async fn geo_sort_with_geo_strings() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["_geo"])).await;
index.update_settings_sortable_attributes(json!(["_geo"])).await;
let (task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded();
let index = shared_index_with_geo_documents().await;
index
.search(
@ -59,7 +17,7 @@ async fn geo_sort_with_geo_strings() {
"sort": ["_geoPoint(0.0, 0.0):asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
},
)
.await;
@ -67,14 +25,7 @@ async fn geo_sort_with_geo_strings() {
#[actix_rt::test]
async fn geo_bounding_box_with_string_and_number() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.update_settings_filterable_attributes(json!(["_geo"])).await;
index.update_settings_sortable_attributes(json!(["_geo"])).await;
let (ret, _code) = index.add_documents(documents, None).await;
index.wait_task(ret.uid()).await.succeeded();
let index = shared_index_with_geo_documents().await;
index
.search(
@ -82,7 +33,7 @@ async fn geo_bounding_box_with_string_and_number() {
"filter": "_geoBoundingBox([89, 179], [-89, -179])",
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -124,14 +75,7 @@ async fn geo_bounding_box_with_string_and_number() {
#[actix_rt::test]
async fn bug_4640() {
// https://github.com/meilisearch/meilisearch/issues/4640
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.update_settings_filterable_attributes(json!(["_geo"])).await;
let (ret, _code) = index.update_settings_sortable_attributes(json!(["_geo"])).await;
index.wait_task(ret.uid()).await.succeeded();
let index = shared_index_with_geo_documents().await;
// Sort the document with the second one first
index
@ -140,7 +84,7 @@ async fn bug_4640() {
"sort": ["_geoPoint(45.4777599, 9.1967508):asc"],
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -203,7 +147,7 @@ async fn geo_asc_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
&json!({"q": "jean"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -248,7 +192,7 @@ async fn geo_asc_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
&json!({"q": "bob"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -285,7 +229,7 @@ async fn geo_asc_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "geo:asc"]}),
&json!({"q": "intel"}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [
@ -325,7 +269,7 @@ async fn geo_sort_with_words() {
&json!({"searchableAttributes": ["id", "doggo"], "rankingRules": ["words", "sort"], "sortableAttributes": [RESERVED_GEO_FIELD_NAME]}),
&json!({"q": "jean", "sort": ["_geoPoint(0.0, 0.0):asc"]}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response, { ".processingTimeMs" => "[time]" }), @r###"
{
"hits": [

View File

@ -2,31 +2,31 @@ use meili_snap::snapshot;
use once_cell::sync::Lazy;
use crate::common::index::Index;
use crate::common::{Server, Value};
use crate::common::{Server, Shared, Value};
use crate::json;
async fn index_with_documents_user_provided<'a>(
server: &'a Server,
server: &'a Server<Shared>,
documents: &Value,
) -> Index<'a> {
let index = server.index("test");
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({ "embedders": {"default": {
"source": "userProvided",
"dimensions": 2}}} ))
.await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
index
}
async fn index_with_documents_hf<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
let index = server.index("test");
async fn index_with_documents_hf<'a>(server: &'a Server<Shared>, documents: &Value) -> Index<'a> {
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({ "embedders": {"default": {
@ -36,11 +36,11 @@ async fn index_with_documents_hf<'a>(server: &'a Server, documents: &Value) -> I
"documentTemplate": "{{doc.title}}, {{doc.desc}}"
}}} ))
.await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.add_documents(documents.clone(), None).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
index
}
@ -76,6 +76,48 @@ static SINGLE_DOCUMENT_VEC: Lazy<Value> = Lazy::new(|| {
}])
});
static TEST_DISTINCT_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
// for query "Captain Marvel" and vector [1.0, 1.0]
json!([
{
"id": 0,
"search": "Captain Planet",
"desc": "#2 for keyword search, #3 for hybrid search",
"_vectors": {
"default": [-1.0, 0.0],
},
"distinct": 0
},
{
"id": 1,
"search": "Captain Marvel",
"desc": "#1 for keyword search, #4 for hybrid search",
"_vectors": {
"default": [-1.0, -1.0],
},
"distinct": 1
},
{
"id": 2,
"search": "Some Captain at least",
"desc": "#3 for keyword search, #1 for hybrid search",
"_vectors": {
"default": [1.0, 1.0],
},
"distinct": 0
},
{
"id": 3,
"search": "Irrelevant Capitaine",
"desc": "#4 for keyword search, #2 for hybrid search",
"_vectors": {
"default": [1.0, 0.0],
},
"distinct": 1
},
])
});
static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
@ -97,8 +139,8 @@ static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -130,8 +172,8 @@ async fn simple_search() {
#[actix_rt::test]
async fn limit_offset() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -143,8 +185,8 @@ async fn limit_offset() {
snapshot!(response["semanticHitCount"], @"0");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -159,8 +201,8 @@ async fn limit_offset() {
#[actix_rt::test]
async fn simple_search_hf() {
let server = Server::new().await;
let index = index_with_documents_hf(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents_hf(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (response, code) = index
.search_post(
@ -211,8 +253,8 @@ async fn simple_search_hf() {
#[actix_rt::test]
async fn distribution_shift() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let search = json!({"q": "Captain", "vector": [1.0, 1.0], "showRankingScore": true, "hybrid": {"embedder": "default", "semanticRatio": 1.0}, "retrieveVectors": true});
let (response, code) = index.search_post(search.clone()).await;
@ -233,7 +275,7 @@ async fn distribution_shift() {
.await;
snapshot!(code, @"202 Accepted");
let response = server.wait_task(response.uid()).await;
let response = server.wait_task(response.uid()).await.succeeded();
snapshot!(response["details"], @r#"{"embedders":{"default":{"distribution":{"mean":0.998,"sigma":0.01}}}}"#);
let (response, code) = index.search_post(search).await;
@ -243,8 +285,8 @@ async fn distribution_shift() {
#[actix_rt::test]
async fn highlighter() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(json!({"q": "Captain Marvel", "vector": [1.0, 1.0],
@ -298,8 +340,8 @@ async fn highlighter() {
#[actix_rt::test]
async fn invalid_semantic_ratio() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let (response, code) = index
.search_post(
@ -370,8 +412,8 @@ async fn invalid_semantic_ratio() {
#[actix_rt::test]
async fn single_document() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SINGLE_DOCUMENT_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SINGLE_DOCUMENT_VEC).await;
let (response, code) = index
.search_post(
@ -386,8 +428,8 @@ async fn single_document() {
#[actix_rt::test]
async fn query_combination() {
let server = Server::new().await;
let index = index_with_documents_user_provided(&server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &SIMPLE_SEARCH_DOCUMENTS_VEC).await;
// search without query and vector, but with hybrid => still placeholder
let (response, code) = index
@ -493,10 +535,54 @@ async fn query_combination() {
snapshot!(response["semanticHitCount"], @"0");
}
// see <https://github.com/meilisearch/meilisearch/issues/5526>
#[actix_rt::test]
async fn distinct_is_applied() {
let server = Server::new_shared();
let index = index_with_documents_user_provided(server, &TEST_DISTINCT_DOCUMENTS).await;
let (response, code) = index.update_settings(json!({ "distinctAttribute": "distinct" } )).await;
assert_eq!(202, code, "{:?}", response);
index.wait_task(response.uid()).await.succeeded();
// pure keyword
let (response, code) = index
.search_post(
json!({"q": "Captain Marvel", "vector": [1.0, 1.0], "hybrid": {"semanticRatio": 0.0, "embedder": "default"}}),
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response["hits"], @r###"[{"id":1,"search":"Captain Marvel","desc":"#1 for keyword search, #4 for hybrid search","distinct":1},{"id":0,"search":"Captain Planet","desc":"#2 for keyword search, #3 for hybrid search","distinct":0}]"###);
snapshot!(response["semanticHitCount"], @"null");
snapshot!(response["estimatedTotalHits"], @"2");
// pure semantic
let (response, code) = index
.search_post(
json!({"q": "Captain Marvel", "vector": [1.0, 1.0], "hybrid": {"semanticRatio": 1.0, "embedder": "default"}}),
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response["hits"], @r###"[{"id":2,"search":"Some Captain at least","desc":"#3 for keyword search, #1 for hybrid search","distinct":0},{"id":3,"search":"Irrelevant Capitaine","desc":"#4 for keyword search, #2 for hybrid search","distinct":1}]"###);
snapshot!(response["semanticHitCount"], @"2");
snapshot!(response["estimatedTotalHits"], @"2");
// hybrid
let (response, code) = index
.search_post(
json!({"q": "Captain Marvel", "vector": [1.0, 1.0], "hybrid": {"semanticRatio": 0.5, "embedder": "default"}}),
)
.await;
snapshot!(code, @"200 OK");
snapshot!(response["hits"], @r###"[{"id":2,"search":"Some Captain at least","desc":"#3 for keyword search, #1 for hybrid search","distinct":0},{"id":1,"search":"Captain Marvel","desc":"#1 for keyword search, #4 for hybrid search","distinct":1}]"###);
snapshot!(response["semanticHitCount"], @"1");
snapshot!(response["estimatedTotalHits"], @"2");
}
#[actix_rt::test]
async fn retrieve_vectors() {
let server = Server::new().await;
let index = index_with_documents_hf(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents_hf(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (response, code) = index
.search_post(
@ -546,7 +632,7 @@ async fn retrieve_vectors() {
let (response, code) = index
.update_settings(json!({ "displayedAttributes": ["id", "title", "desc", "_vectors"]} ))
.await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index
@ -596,7 +682,7 @@ async fn retrieve_vectors() {
// remove `_vectors` from displayed attributes
let (response, code) =
index.update_settings(json!({ "displayedAttributes": ["id", "title", "desc"]} )).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index

View File

@ -89,9 +89,9 @@ static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
index
.update_settings(
@ -196,9 +196,9 @@ async fn simple_search() {
#[actix_rt::test]
async fn force_locales() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -211,10 +211,10 @@ async fn force_locales() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -274,9 +274,9 @@ async fn force_locales() {
#[actix_rt::test]
async fn force_locales_with_pattern() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -289,10 +289,10 @@ async fn force_locales_with_pattern() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -352,9 +352,9 @@ async fn force_locales_with_pattern() {
#[actix_rt::test]
async fn force_locales_with_pattern_nested() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = NESTED_DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
@ -365,10 +365,10 @@ async fn force_locales_with_pattern_nested() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -423,9 +423,9 @@ async fn force_locales_with_pattern_nested() {
}
#[actix_rt::test]
async fn force_different_locales_with_pattern() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -440,10 +440,10 @@ async fn force_different_locales_with_pattern() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -499,9 +499,9 @@ async fn force_different_locales_with_pattern() {
#[actix_rt::test]
async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -518,10 +518,10 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -577,9 +577,9 @@ async fn auto_infer_locales_at_search_with_attributes_to_search_on() {
#[actix_rt::test]
async fn auto_infer_locales_at_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(
@ -592,10 +592,10 @@ async fn auto_infer_locales_at_search() {
}),
)
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -676,9 +676,9 @@ async fn auto_infer_locales_at_search() {
#[actix_rt::test]
async fn force_different_locales_with_pattern_nested() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = NESTED_DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
@ -691,10 +691,10 @@ async fn force_different_locales_with_pattern_nested() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -774,9 +774,9 @@ async fn force_different_locales_with_pattern_nested() {
#[actix_rt::test]
async fn settings_change() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = NESTED_DOCUMENTS.clone();
let (task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(task.uid()).await.succeeded();
@ -789,10 +789,10 @@ async fn settings_change() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 1,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -852,10 +852,10 @@ async fn settings_change() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 2,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -906,9 +906,9 @@ async fn settings_change() {
#[actix_rt::test]
async fn invalid_locales() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
index
.update_settings(
@ -945,9 +945,9 @@ async fn invalid_locales() {
#[actix_rt::test]
async fn invalid_localized_attributes_rules() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let (response, _) = index
.update_settings(json!({
"localizedAttributes": [
@ -1015,19 +1015,19 @@ async fn invalid_localized_attributes_rules() {
#[actix_rt::test]
async fn simple_facet_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
"filterableAttributes": ["name_en", "name_ja", "name_zh"],
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -1073,9 +1073,9 @@ async fn simple_facet_search() {
#[actix_rt::test]
async fn facet_search_with_localized_attributes() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = DOCUMENTS.clone();
let (response, _) = index
.update_settings(json!({
@ -1086,10 +1086,10 @@ async fn facet_search_with_localized_attributes() {
]
}))
.await;
snapshot!(response, @r###"
snapshot!(json_string!(response, { ".taskUid" => "[task_uid]", ".enqueuedAt" => "[date]" }), @r###"
{
"taskUid": 0,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -1146,9 +1146,9 @@ async fn facet_search_with_localized_attributes() {
#[actix_rt::test]
async fn swedish_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = json!([
{"id": "tra1-1", "product": "trä"},
{"id": "tra2-1", "product": "traktor"},
@ -1269,9 +1269,9 @@ async fn swedish_search() {
#[actix_rt::test]
async fn german_search() {
let server = Server::new().await;
let server = Server::new_shared();
let index = server.unique_index();
let index = server.index("test");
let documents = json!([
{"id": 1, "product": "Interkulturalität"},
{"id": 2, "product": "Wissensorganisation"},

View File

@ -2,11 +2,11 @@ use meili_snap::snapshot;
use once_cell::sync::Lazy;
use crate::common::index::Index;
use crate::common::{Server, Value};
use crate::common::{Server, Shared, Value};
use crate::json;
async fn index_with_documents<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
let index = server.index("test");
async fn index_with_documents<'a>(server: &'a Server<Shared>, documents: &Value) -> Index<'a> {
let index = server.unique_index();
let (task, _status_code) = index.add_documents(documents.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -48,8 +48,8 @@ static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Captain Marvel", "matchingStrategy": "last", "attributesToRetrieve": ["id"]}), |response, code| {
@ -75,8 +75,8 @@ async fn simple_search() {
#[actix_rt::test]
async fn search_with_typo() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Capitain Marvel", "matchingStrategy": "last", "attributesToRetrieve": ["id"]}), |response, code| {
@ -102,8 +102,8 @@ async fn search_with_typo() {
#[actix_rt::test]
async fn search_with_unknown_word() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Captain Supercopter Marvel", "matchingStrategy": "last", "attributesToRetrieve": ["id"]}), |response, code| {

File diff suppressed because it is too large Load Diff

View File

@ -2296,6 +2296,7 @@ async fn error_remote_500_once() {
}
#[actix_rt::test]
#[ignore]
async fn error_remote_timeout() {
let ms0 = Server::new().await;
let ms1 = Server::new().await;

View File

@ -7,7 +7,7 @@ async fn default_search_should_return_estimated_total_hit() {
let index = shared_index_with_documents().await;
index
.search(json!({}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert!(response.get("estimatedTotalHits").is_some());
assert!(response.get("limit").is_some());
assert!(response.get("offset").is_some());
@ -25,7 +25,7 @@ async fn simple_search() {
let index = shared_index_with_documents().await;
index
.search(json!({"page": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
assert!(response.get("totalHits").is_some());
assert_eq!(response["page"], 1);
@ -44,7 +44,7 @@ async fn page_zero_should_not_return_any_result() {
let index = shared_index_with_documents().await;
index
.search(json!({"page": 0}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 0);
assert!(response.get("totalHits").is_some());
assert_eq!(response["page"], 0);
@ -58,7 +58,7 @@ async fn hits_per_page_1() {
let index = shared_index_with_documents().await;
index
.search(json!({"hitsPerPage": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
assert_eq!(response["totalHits"], 5);
assert_eq!(response["page"], 1);
@ -72,7 +72,7 @@ async fn hits_per_page_0_should_not_return_any_result() {
let index = shared_index_with_documents().await;
index
.search(json!({"hitsPerPage": 0}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["hits"].as_array().unwrap().len(), 0);
assert_eq!(response["totalHits"], 5);
assert_eq!(response["page"], 1);
@ -126,7 +126,7 @@ async fn ensure_placeholder_search_hit_count_valid() {
for page in 0..=4 {
index
.search(json!({"page": page, "hitsPerPage": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["totalHits"], 4);
assert_eq!(response["totalPages"], 4);
})

View File

@ -2,11 +2,11 @@ use meili_snap::{json_string, snapshot};
use once_cell::sync::Lazy;
use crate::common::index::Index;
use crate::common::{Server, Value};
use crate::common::{Server, Shared, Value};
use crate::json;
async fn index_with_documents<'a>(server: &'a Server, documents: &Value) -> Index<'a> {
let index = server.index("test");
async fn index_with_documents<'a>(server: &'a Server<Shared>, documents: &Value) -> Index<'a> {
let index = server.unique_index();
let (task, _code) = index.add_documents(documents.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -34,8 +34,8 @@ static SIMPLE_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn simple_search_on_title() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search should return 2 documents (ids: 2 and 3).
index
@ -51,8 +51,8 @@ async fn simple_search_on_title() {
#[actix_rt::test]
async fn search_no_searchable_attribute_set() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(
@ -93,8 +93,8 @@ async fn search_no_searchable_attribute_set() {
#[actix_rt::test]
async fn search_on_all_attributes() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
index
.search(json!({"q": "Captain Marvel", "attributesToSearchOn": ["*"]}), |response, code| {
@ -106,8 +106,8 @@ async fn search_on_all_attributes() {
#[actix_rt::test]
async fn search_on_all_attributes_restricted_set() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (task, _status_code) = index.update_settings_searchable_attributes(json!(["title"])).await;
index.wait_task(task.uid()).await.succeeded();
@ -121,8 +121,8 @@ async fn search_on_all_attributes_restricted_set() {
#[actix_rt::test]
async fn simple_prefix_search_on_title() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search should return 2 documents (ids: 2 and 3).
index
@ -135,8 +135,8 @@ async fn simple_prefix_search_on_title() {
#[actix_rt::test]
async fn simple_search_on_title_matching_strategy_all() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search matching strategy all should only return 1 document (ids: 2).
index
.search(json!({"q": "Captain Marvel", "attributesToSearchOn": ["title"], "matchingStrategy": "all"}), |response, code| {
@ -148,8 +148,8 @@ async fn simple_search_on_title_matching_strategy_all() {
#[actix_rt::test]
async fn simple_search_on_no_field() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// simple search on no field shouldn't return any document.
index
.search(json!({"q": "Captain Marvel", "attributesToSearchOn": []}), |response, code| {
@ -161,8 +161,8 @@ async fn simple_search_on_no_field() {
#[actix_rt::test]
async fn word_ranking_rule_order() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
// Document 3 should appear before document 2.
index
@ -189,8 +189,8 @@ async fn word_ranking_rule_order() {
#[actix_rt::test]
async fn word_ranking_rule_order_exact_words() {
let server = Server::new().await;
let index = index_with_documents(&server, &SIMPLE_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &SIMPLE_SEARCH_DOCUMENTS).await;
let (task, _status_code) = index
.update_settings_typo_tolerance(json!({"disableOnWords": ["Captain", "Marvel"]}))
.await;
@ -221,9 +221,9 @@ async fn word_ranking_rule_order_exact_words() {
#[actix_rt::test]
async fn typo_ranking_rule_order() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Capitain Marivel",
@ -260,9 +260,9 @@ async fn typo_ranking_rule_order() {
#[actix_rt::test]
async fn attributes_ranking_rule_order() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Captain Marvel",
@ -301,9 +301,9 @@ async fn attributes_ranking_rule_order() {
#[actix_rt::test]
async fn exactness_ranking_rule_order() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Captain Marvel",
@ -340,9 +340,9 @@ async fn exactness_ranking_rule_order() {
#[actix_rt::test]
async fn search_on_exact_field() {
let server = Server::new().await;
let server = Server::new_shared();
let index = index_with_documents(
&server,
server,
&json!([
{
"title": "Captain Marvel",
@ -359,7 +359,7 @@ async fn search_on_exact_field() {
let (response, code) =
index.update_settings_typo_tolerance(json!({ "disableOnAttributes": ["exact"] })).await;
assert_eq!(202, code, "{:?}", response);
assert_eq!(202, code, "{response:?}");
index.wait_task(response.uid()).await.succeeded();
// Searching on an exact attribute should only return the document matching without typo.
index
@ -372,7 +372,7 @@ async fn search_on_exact_field() {
#[actix_rt::test]
async fn phrase_search_on_title() {
let server = Server::new().await;
let server = Server::new_shared();
let documents = json!([
{ "id": 8, "desc": "Document Review", "title": "Document Review Specialist II" },
{ "id": 5, "desc": "Document Review", "title": "Document Review Attorney" },
@ -383,7 +383,7 @@ async fn phrase_search_on_title() {
{ "id": 7, "desc": "Document Review", "title": "Document Review Specialist II" },
{ "id": 6, "desc": "Document Review", "title": "Document Review (Entry Level)" }
]);
let index = index_with_documents(&server, &documents).await;
let index = index_with_documents(server, &documents).await;
index
.search(
@ -460,8 +460,8 @@ static NESTED_SEARCH_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
#[actix_rt::test]
async fn nested_search_on_title_with_prefix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Wildcard should match to 'details.' attribute
index
@ -486,8 +486,8 @@ async fn nested_search_on_title_with_prefix_wildcard() {
#[actix_rt::test]
async fn nested_search_with_suffix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Wildcard should match to any attribute inside 'details.'
// It's worth noting the difference between 'details.*' and '*.title'
@ -553,8 +553,8 @@ async fn nested_search_with_suffix_wildcard() {
#[actix_rt::test]
async fn nested_search_on_title_restricted_set_with_suffix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
let (task, _status_code) =
index.update_settings_searchable_attributes(json!(["details.title"])).await;
index.wait_task(task.uid()).await.succeeded();
@ -581,8 +581,8 @@ async fn nested_search_on_title_restricted_set_with_suffix_wildcard() {
#[actix_rt::test]
async fn nested_search_no_searchable_attribute_set_with_any_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
index
.search(
@ -632,8 +632,8 @@ async fn nested_search_no_searchable_attribute_set_with_any_wildcard() {
#[actix_rt::test]
async fn nested_prefix_search_on_title_with_prefix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Nested prefix search with prefix wildcard should return 2 documents (ids: 2 and 3).
index
@ -658,8 +658,8 @@ async fn nested_prefix_search_on_title_with_prefix_wildcard() {
#[actix_rt::test]
async fn nested_prefix_search_on_details_with_suffix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
index
.search(
@ -686,8 +686,8 @@ async fn nested_prefix_search_on_details_with_suffix_wildcard() {
#[actix_rt::test]
async fn nested_prefix_search_on_weaknesses_with_suffix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Wildcard search on nested weaknesses should return 2 documents (ids: 1 and 3)
index
@ -712,8 +712,8 @@ async fn nested_prefix_search_on_weaknesses_with_suffix_wildcard() {
#[actix_rt::test]
async fn nested_search_on_title_matching_strategy_all() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Nested search matching strategy all should only return 1 document (ids: 3)
index
@ -735,8 +735,8 @@ async fn nested_search_on_title_matching_strategy_all() {
#[actix_rt::test]
async fn nested_attributes_ranking_rule_order_with_prefix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Document 3 should appear before documents 1 and 2
index
@ -766,8 +766,8 @@ async fn nested_attributes_ranking_rule_order_with_prefix_wildcard() {
#[actix_rt::test]
async fn nested_attributes_ranking_rule_order_with_suffix_wildcard() {
let server = Server::new().await;
let index = index_with_documents(&server, &NESTED_SEARCH_DOCUMENTS).await;
let server = Server::new_shared();
let index = index_with_documents(server, &NESTED_SEARCH_DOCUMENTS).await;
// Document 3 should appear before documents 1 and 2
index

View File

@ -4,7 +4,7 @@ source: crates/meilisearch/tests/search/distinct.rs
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "tamo",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,

View File

@ -3,8 +3,8 @@ use crate::json;
#[actix_rt::test]
async fn set_and_reset_distinct_attribute() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task1, _code) = index.update_settings(json!({ "distinctAttribute": "test"})).await;
index.wait_task(task1.uid()).await.succeeded();
@ -24,8 +24,8 @@ async fn set_and_reset_distinct_attribute() {
#[actix_rt::test]
async fn set_and_reset_distinct_attribute_with_dedicated_route() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (update_task1, _code) = index.update_distinct_attribute(json!("test")).await;
index.wait_task(update_task1.uid()).await.succeeded();

View File

@ -11,59 +11,62 @@ macro_rules! test_setting_routes {
#[actix_rt::test]
async fn get_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (_response, code) = server.service.get(url).await;
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn update_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.$update_verb(url, serde_json::Value::Null.into()).await;
assert_eq!(code, 202, "{}", response);
server.index("").wait_task(0).await;
let (response, code) = server.index("test").get().await;
assert_eq!(code, 200, "{}", response);
}
#[actix_rt::test]
async fn delete_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (_, code) = server.service.delete(url).await;
assert_eq!(code, 202);
let response = server.index("").wait_task(0).await;
assert_eq!(response["status"], "failed");
}
#[actix_rt::test]
async fn get_default() {
let server = Server::new().await;
let index = server.index("test");
let (response, code) = index.create(None).await;
assert_eq!(code, 202, "{}", response);
index.wait_task(0).await;
let url = format!("/indexes/test/settings/{}",
let server = Server::new_shared();
let index_name = uuid::Uuid::new_v4().to_string();
let url = format!("/indexes/{index_name}/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.get(url).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 404, "{response}");
}
#[actix_rt::test]
async fn update_unexisting_index() {
let server = Server::new_shared();
let index_name = uuid::Uuid::new_v4().to_string();
let url = format!("/indexes/{index_name}/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.$update_verb(url, serde_json::Value::Null.into()).await;
assert_eq!(code, 202, "{response}");
let (response, code) = server.service.get(format!("/indixes/{index_name}")).await;
assert_eq!(code, 404, "{response}");
}
#[actix_rt::test]
async fn delete_unexisting_index() {
let server = Server::new_shared();
let index_name = uuid::Uuid::new_v4().to_string();
let url = format!("/indexes/{index_name}/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.delete(url).await;
assert_eq!(code, 202, "{response}");
let (response, code) = server.service.get(format!("/indixes/{index_name}")).await;
assert_eq!(code, 404, "{response}");
}
#[actix_rt::test]
async fn get_default() {
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.create(None).await;
assert_eq!(code, 202, "{response}");
index.wait_task(response.uid()).await.succeeded();
let url = format!("/indexes/{}/settings/{}",
index.uid,
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.get(url).await;
assert_eq!(code, 200, "{response}");
let expected = crate::json!($default_value);
assert_eq!(expected, response);
}
@ -185,15 +188,16 @@ test_setting_routes!(
#[actix_rt::test]
async fn get_settings_unexisting_index() {
let server = Server::new().await;
let (response, code) = server.index("test").settings().await;
assert_eq!(code, 404, "{}", response)
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.settings().await;
assert_eq!(code, 404, "{response}")
}
#[actix_rt::test]
async fn get_settings() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) = index.create(None).await;
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.settings().await;
@ -237,9 +241,8 @@ async fn get_settings() {
#[actix_rt::test]
async fn secrets_are_hidden_in_settings() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, _code) = index.create(None).await;
index.wait_task(response.uid()).await.succeeded();
@ -259,11 +262,11 @@ async fn secrets_are_hidden_in_settings() {
.await;
meili_snap::snapshot!(code, @"202 Accepted");
meili_snap::snapshot!(meili_snap::json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
meili_snap::snapshot!(meili_snap::json_string!(response, { ".taskUid" => "[task_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
{
"taskUid": 1,
"indexUid": "test",
"taskUid": "[task_uid]",
"indexUid": "[uuid]",
"status": "enqueued",
"type": "settingsUpdate",
"enqueuedAt": "[date]"
@ -272,7 +275,7 @@ async fn secrets_are_hidden_in_settings() {
let settings_update_uid = response.uid();
index.wait_task(settings_update_uid).await;
index.wait_task(settings_update_uid).await.succeeded();
let (response, code) = index.settings().await;
meili_snap::snapshot!(code, @"200 OK");
@ -360,16 +363,16 @@ async fn secrets_are_hidden_in_settings() {
#[actix_rt::test]
async fn error_update_settings_unknown_field() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (_response, code) = index.update_settings(json!({"foo": 12})).await;
assert_eq!(code, 400);
}
#[actix_rt::test]
async fn test_partial_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.update_settings(json!({"displayedAttributes": ["foo"]})).await;
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.settings().await;
@ -388,20 +391,18 @@ async fn test_partial_update() {
#[actix_rt::test]
async fn error_delete_settings_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.delete_settings().await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "failed");
index.wait_task(task.uid()).await.failed();
}
#[actix_rt::test]
async fn reset_all_settings() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let documents = json!([
{
@ -413,7 +414,6 @@ async fn reset_all_settings() {
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
assert_eq!(response["taskUid"], 0);
index.wait_task(response.uid()).await.succeeded();
let (update_task,_status_code) = index
@ -446,17 +446,15 @@ async fn reset_all_settings() {
#[actix_rt::test]
async fn update_setting_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, code) = index.update_settings(json!({})).await;
assert_eq!(code, 202);
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
let (_response, code) = index.get().await;
assert_eq!(code, 200);
let (task, _status_code) = index.delete_settings().await;
let response = index.wait_task(task.uid()).await;
assert_eq!(response["status"], "succeeded");
index.wait_task(task.uid()).await.succeeded();
}
#[actix_rt::test]
@ -477,8 +475,8 @@ async fn error_update_setting_unexisting_index_invalid_uid() {
#[actix_rt::test]
async fn error_set_invalid_ranking_rules() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.create(None).await;
let (response, code) = index.update_settings(json!({ "rankingRules": [ "manyTheFish"]})).await;
@ -495,8 +493,8 @@ async fn error_set_invalid_ranking_rules() {
#[actix_rt::test]
async fn set_and_reset_distinct_attribute_with_dedicated_route() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index.update_distinct_attribute(json!("test")).await;
index.wait_task(task.uid()).await.succeeded();
@ -516,8 +514,8 @@ async fn set_and_reset_distinct_attribute_with_dedicated_route() {
#[actix_rt::test]
async fn granular_filterable_attributes() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index.create(None).await;
let (response, code) =
@ -535,7 +533,7 @@ async fn granular_filterable_attributes() {
index.wait_task(response.uid()).await.succeeded();
let (response, code) = index.settings().await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
snapshot!(json_string!(response["filterableAttributes"]), @r###"
[
{

View File

@ -26,8 +26,8 @@ static DOCUMENTS: Lazy<crate::common::Value> = Lazy::new(|| {
#[actix_rt::test]
async fn attribute_scale_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -38,7 +38,7 @@ async fn attribute_scale_search() {
"rankingRules": ["words", "typo", "proximity"],
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
// the expected order is [1, 3, 2] instead of [3, 1, 2]
@ -99,8 +99,8 @@ async fn attribute_scale_search() {
#[actix_rt::test]
async fn attribute_scale_phrase_search() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -167,8 +167,8 @@ async fn attribute_scale_phrase_search() {
#[actix_rt::test]
async fn word_scale_set_and_reset() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -282,8 +282,8 @@ async fn word_scale_set_and_reset() {
#[actix_rt::test]
async fn attribute_scale_default_ranking_rules() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.add_documents(DOCUMENTS.clone(), None).await;
index.wait_task(task.uid()).await.succeeded();
@ -293,7 +293,7 @@ async fn attribute_scale_default_ranking_rules() {
"proximityPrecision": "byAttribute"
}))
.await;
assert_eq!("202", code.as_str(), "{:?}", response);
assert_eq!("202", code.as_str(), "{response:?}");
index.wait_task(response.uid()).await.succeeded();
// the expected order is [3, 1, 2]

View File

@ -5,8 +5,8 @@ use crate::json;
#[actix_rt::test]
async fn set_and_reset() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _code) = index
.update_settings(json!({
@ -70,8 +70,8 @@ async fn set_and_search() {
},
]);
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (add_task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(add_task.uid()).await.succeeded();
@ -224,8 +224,8 @@ async fn advanced_synergies() {
},
]);
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (add_task, _status_code) = index.add_documents(documents, None).await;
index.wait_task(add_task.uid()).await.succeeded();

View File

@ -6,11 +6,11 @@ use crate::json;
#[actix_rt::test]
async fn similar_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let expected_response = json!({
"message": "Index `test` not found.",
"message": format!("Index `{}` not found.", index.uid),
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -26,12 +26,12 @@ async fn similar_unexisting_index() {
#[actix_rt::test]
async fn similar_unexisting_parameter() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
index
.similar(json!({"id": 287947, "marin": "hello"}), |response, code| {
assert_eq!(code, 400, "{}", response);
assert_eq!(code, 400, "{response}");
assert_eq!(response["code"], "bad_request");
})
.await;
@ -39,8 +39,8 @@ async fn similar_unexisting_parameter() {
#[actix_rt::test]
async fn similar_bad_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -53,7 +53,7 @@ async fn similar_bad_id() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) = index.similar_post(json!({"id": ["doggo"], "embedder": "manual"})).await;
snapshot!(code, @"400 Bad Request");
@ -69,8 +69,8 @@ async fn similar_bad_id() {
#[actix_rt::test]
async fn similar_bad_ranking_score_threshold() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -83,7 +83,7 @@ async fn similar_bad_ranking_score_threshold() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) = index.similar_post(json!({"rankingScoreThreshold": ["doggo"]})).await;
snapshot!(code, @"400 Bad Request");
@ -99,8 +99,8 @@ async fn similar_bad_ranking_score_threshold() {
#[actix_rt::test]
async fn similar_invalid_ranking_score_threshold() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -113,7 +113,7 @@ async fn similar_invalid_ranking_score_threshold() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) = index.similar_post(json!({"rankingScoreThreshold": 42})).await;
snapshot!(code, @"400 Bad Request");
@ -129,8 +129,8 @@ async fn similar_invalid_ranking_score_threshold() {
#[actix_rt::test]
async fn similar_invalid_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -143,7 +143,7 @@ async fn similar_invalid_id() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": "http://invalid-docid/", "embedder": "manual"})).await;
@ -160,8 +160,8 @@ async fn similar_invalid_id() {
#[actix_rt::test]
async fn similar_not_found_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -174,7 +174,7 @@ async fn similar_not_found_id() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": "definitely-doesnt-exist", "embedder": "manual"})).await;
@ -191,8 +191,8 @@ async fn similar_not_found_id() {
#[actix_rt::test]
async fn similar_bad_offset() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -205,7 +205,7 @@ async fn similar_bad_offset() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": 287947, "offset": "doggo", "embedder": "manual"})).await;
@ -233,8 +233,8 @@ async fn similar_bad_offset() {
#[actix_rt::test]
async fn similar_bad_limit() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -247,7 +247,7 @@ async fn similar_bad_limit() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let (response, code) =
index.similar_post(json!({"id": 287947, "limit": "doggo", "embedder": "manual"})).await;
@ -277,8 +277,8 @@ async fn similar_bad_limit() {
async fn similar_bad_filter() {
// Since a filter is deserialized as a json Value it will never fail to deserialize.
// Thus the error message is not generated by deserr but written by us.
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -291,7 +291,7 @@ async fn similar_bad_filter() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
snapshot!(code, @"202 Accepted");
@ -316,8 +316,8 @@ async fn similar_bad_filter() {
#[actix_rt::test]
async fn filter_invalid_syntax_object() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -330,7 +330,7 @@ async fn filter_invalid_syntax_object() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -354,8 +354,8 @@ async fn filter_invalid_syntax_object() {
#[actix_rt::test]
async fn filter_invalid_syntax_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -368,7 +368,7 @@ async fn filter_invalid_syntax_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -392,8 +392,8 @@ async fn filter_invalid_syntax_array() {
#[actix_rt::test]
async fn filter_invalid_syntax_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -406,7 +406,7 @@ async fn filter_invalid_syntax_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -432,8 +432,8 @@ async fn filter_invalid_syntax_string() {
#[actix_rt::test]
async fn filter_invalid_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -446,7 +446,7 @@ async fn filter_invalid_attribute_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -473,8 +473,8 @@ async fn filter_invalid_attribute_array() {
#[actix_rt::test]
async fn filter_invalid_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -487,7 +487,7 @@ async fn filter_invalid_attribute_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -514,8 +514,8 @@ async fn filter_invalid_attribute_string() {
#[actix_rt::test]
async fn filter_reserved_geo_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -528,7 +528,7 @@ async fn filter_reserved_geo_attribute_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -554,8 +554,8 @@ async fn filter_reserved_geo_attribute_array() {
#[actix_rt::test]
async fn filter_reserved_geo_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -568,7 +568,7 @@ async fn filter_reserved_geo_attribute_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -594,8 +594,8 @@ async fn filter_reserved_geo_attribute_string() {
#[actix_rt::test]
async fn filter_reserved_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -608,7 +608,7 @@ async fn filter_reserved_attribute_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -634,8 +634,8 @@ async fn filter_reserved_attribute_array() {
#[actix_rt::test]
async fn filter_reserved_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -648,7 +648,7 @@ async fn filter_reserved_attribute_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -674,8 +674,8 @@ async fn filter_reserved_attribute_string() {
#[actix_rt::test]
async fn filter_reserved_geo_point_array() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -688,7 +688,7 @@ async fn filter_reserved_geo_point_array() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -714,8 +714,8 @@ async fn filter_reserved_geo_point_array() {
#[actix_rt::test]
async fn filter_reserved_geo_point_string() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -728,7 +728,7 @@ async fn filter_reserved_geo_point_string() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;
@ -754,8 +754,8 @@ async fn filter_reserved_geo_point_string() {
#[actix_rt::test]
async fn similar_bad_retrieve_vectors() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) =
index.similar_post(json!({"retrieveVectors": "doggo", "embedder": "manual"})).await;
@ -806,8 +806,8 @@ async fn similar_bad_retrieve_vectors() {
#[actix_rt::test]
async fn similar_bad_embedder() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -820,7 +820,7 @@ async fn similar_bad_embedder() {
"filterableAttributes": ["title"]}))
.await;
snapshot!(code, @"202 Accepted");
server.wait_task(response.uid()).await;
server.wait_task(response.uid()).await.succeeded();
let documents = DOCUMENTS.clone();
let (value, code) = index.add_documents(documents, None).await;

View File

@ -1,8 +1,7 @@
mod errors;
mod webhook;
use meili_snap::insta::assert_json_snapshot;
use meili_snap::snapshot;
use meili_snap::{json_string, snapshot};
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
@ -11,14 +10,12 @@ use crate::json;
#[actix_rt::test]
async fn error_get_unexisting_task_status() {
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (response, code) = index.get_task(1).await;
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index.get_task(u32::MAX as u64).await;
let expected_response = json!({
"message": "Task `1` not found.",
"message": "Task `4294967295` not found.",
"code": "task_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#task_not_found"
@ -30,8 +27,8 @@ async fn error_get_unexisting_task_status() {
#[actix_rt::test]
async fn get_task_status() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (create_task, _status_code) = index.create(None).await;
let (add_task, _status_code) = index
.add_documents(
@ -50,6 +47,7 @@ async fn get_task_status() {
#[actix_rt::test]
async fn list_tasks() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
@ -64,6 +62,7 @@ async fn list_tasks() {
#[actix_rt::test]
async fn list_tasks_pagination_and_reverse() {
// do not use a shared server here, as we want to assert tasks ids and we need them to be stable
let server = Server::new().await;
// First of all we want to create a lot of tasks very quickly. The fastest way is to delete a lot of unexisting indexes
let mut last_task = None;
@ -71,7 +70,7 @@ async fn list_tasks_pagination_and_reverse() {
let index = server.index(format!("test-{i}"));
last_task = Some(index.create(None).await.0.uid());
}
server.wait_task(last_task.unwrap()).await;
server.wait_task(last_task.unwrap()).await.succeeded();
let (response, code) = server.tasks_filter("limit=3").await;
assert_eq!(code, 200);
@ -103,13 +102,14 @@ async fn list_tasks_pagination_and_reverse() {
#[actix_rt::test]
async fn list_tasks_with_star_filters() {
let server = Server::new().await;
// Do not use a unique index here, as we want to test the `indexUids=*` filter.
let index = server.index("test");
let (task, _code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
index
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
.await;
let (response, code) = index.service.get("/tasks?indexUids=test").await;
let (response, code) = index.service.get(format!("/tasks?indexUids={}", index.uid)).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 2);
@ -127,26 +127,30 @@ async fn list_tasks_with_star_filters() {
let (response, code) =
index.service.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*").await;
assert_eq!(code, 200, "{:?}", response);
assert_eq!(code, 200, "{response:?}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
let (response, code) = index
.service
.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test")
.get(format!(
"/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids={}",
index.uid
))
.await;
assert_eq!(code, 200, "{:?}", response);
assert_eq!(code, 200, "{response:?}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
let (response, code) = index
.service
.get("/tasks?types=*,documentAdditionOrUpdate&statuses=*,failed&indexUids=test,*")
.await;
assert_eq!(code, 200, "{:?}", response);
assert_eq!(code, 200, "{response:?}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn list_tasks_status_filtered() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
@ -155,20 +159,21 @@ async fn list_tasks_status_filtered() {
index.wait_task(task.uid()).await.failed();
let (response, code) = index.filtered_tasks(&[], &["succeeded"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 1);
let (response, code) = index.filtered_tasks(&[], &["succeeded"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 1);
let (response, code) = index.filtered_tasks(&[], &["succeeded", "failed"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn list_tasks_type_filtered() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
@ -178,32 +183,34 @@ async fn list_tasks_type_filtered() {
.await;
let (response, code) = index.filtered_tasks(&["indexCreation"], &[], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 1);
let (response, code) =
index.filtered_tasks(&["indexCreation", "documentAdditionOrUpdate"], &[], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
#[actix_rt::test]
async fn list_tasks_invalid_canceled_by_filter() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
index
let (task, _code) = index
.add_documents(serde_json::from_str(include_str!("../assets/test_set.json")).unwrap(), None)
.await;
let (response, code) = index.filtered_tasks(&[], &[], &["0"]).await;
assert_eq!(code, 200, "{}", response);
let (response, code) =
index.filtered_tasks(&[], &[], &[format!("{}", task.uid()).as_str()]).await;
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 0);
}
#[actix_rt::test]
async fn list_tasks_status_and_type_filtered() {
// Do not use a shared server because we want to assert stuff against the global list of tasks
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.create(None).await;
@ -213,7 +220,7 @@ async fn list_tasks_status_and_type_filtered() {
.await;
let (response, code) = index.filtered_tasks(&["indexCreation"], &["failed"], &[]).await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 0);
let (response, code) = index
@ -223,12 +230,12 @@ async fn list_tasks_status_and_type_filtered() {
&[],
)
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(code, 200, "{response}");
assert_eq!(response["results"].as_array().unwrap().len(), 2);
}
macro_rules! assert_valid_summarized_task {
($response:expr, $task_type:literal, $index:literal) => {{
($response:expr, $task_type:literal, $index:tt) => {{
assert_eq!($response.as_object().unwrap().len(), 5);
assert!($response["taskUid"].as_u64().is_some());
assert_eq!($response["indexUid"], $index);
@ -242,49 +249,49 @@ macro_rules! assert_valid_summarized_task {
#[actix_web::test]
async fn test_summarized_task_view() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let index_uid = index.uid.clone();
let (response, _) = index.create(None).await;
assert_valid_summarized_task!(response, "indexCreation", "test");
assert_valid_summarized_task!(response, "indexCreation", index_uid);
let (response, _) = index.update(None).await;
assert_valid_summarized_task!(response, "indexUpdate", "test");
assert_valid_summarized_task!(response, "indexUpdate", index_uid);
let (response, _) = index.update_settings(json!({})).await;
assert_valid_summarized_task!(response, "settingsUpdate", "test");
assert_valid_summarized_task!(response, "settingsUpdate", index_uid);
let (response, _) = index.update_documents(json!([{"id": 1}]), None).await;
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", "test");
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", index_uid);
let (response, _) = index.add_documents(json!([{"id": 1}]), None).await;
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", "test");
assert_valid_summarized_task!(response, "documentAdditionOrUpdate", index_uid);
let (response, _) = index.delete_document(1).await;
assert_valid_summarized_task!(response, "documentDeletion", "test");
assert_valid_summarized_task!(response, "documentDeletion", index_uid);
let (response, _) = index.clear_all_documents().await;
assert_valid_summarized_task!(response, "documentDeletion", "test");
assert_valid_summarized_task!(response, "documentDeletion", index_uid);
let (response, _) = index.delete().await;
assert_valid_summarized_task!(response, "indexDeletion", "test");
assert_valid_summarized_task!(response, "indexDeletion", index_uid);
}
#[actix_web::test]
async fn test_summarized_document_addition_or_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(0).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
let (task, _) = index.get_task(task.uid()).await;
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -303,14 +310,13 @@ async fn test_summarized_document_addition_or_update() {
let (task, _status_code) =
index.add_documents(json!({ "id": 42, "content": "doggos & fluff" }), Some("id")).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(1).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
let (task, _) = index.get_task(task.uid()).await;
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -329,18 +335,22 @@ async fn test_summarized_document_addition_or_update() {
#[actix_web::test]
async fn test_summarized_delete_documents_by_batch() {
let server = Server::new().await;
let index = server.index("test");
let (task, _status_code) = index.delete_batch(vec![1, 2, 3]).await;
let server = Server::new_shared();
let index = server.unique_index();
let non_existing_task_id1 = u32::MAX as u64;
let non_existing_task_id2 = non_existing_task_id1 - 1;
let non_existing_task_id3 = non_existing_task_id1 - 2;
let (task, _status_code) = index
.delete_batch(vec![non_existing_task_id1, non_existing_task_id2, non_existing_task_id3])
.await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(0).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
let (task, _) = index.get_task(task.uid()).await;
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -350,7 +360,7 @@ async fn test_summarized_delete_documents_by_batch() {
"originalFilter": null
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -366,13 +376,12 @@ async fn test_summarized_delete_documents_by_batch() {
let (del_task, _status_code) = index.delete_batch(vec![42]).await;
index.wait_task(del_task.uid()).await.succeeded();
let (task, _) = index.get_task(del_task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -392,20 +401,19 @@ async fn test_summarized_delete_documents_by_batch() {
#[actix_web::test]
async fn test_summarized_delete_documents_by_filter() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) =
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -415,7 +423,7 @@ async fn test_summarized_delete_documents_by_filter() {
"originalFilter": "\"doggo = bernese\""
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -432,13 +440,12 @@ async fn test_summarized_delete_documents_by_filter() {
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -448,7 +455,7 @@ async fn test_summarized_delete_documents_by_filter() {
"originalFilter": "\"doggo = bernese\""
},
"error": {
"message": "Index `test`: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
"message": "Index `[uuid]`: Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
"code": "invalid_document_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
@ -465,13 +472,12 @@ async fn test_summarized_delete_documents_by_filter() {
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 4,
"batchUid": 4,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -491,18 +497,17 @@ async fn test_summarized_delete_documents_by_filter() {
#[actix_web::test]
async fn test_summarized_delete_document_by_id() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.delete_document(1).await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "documentDeletion",
"canceledBy": null,
@ -512,7 +517,7 @@ async fn test_summarized_delete_document_by_id() {
"originalFilter": null
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -528,13 +533,12 @@ async fn test_summarized_delete_document_by_id() {
let (task, _status_code) = index.delete_document(42).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 2,
"batchUid": 2,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentDeletion",
"canceledBy": null,
@ -554,12 +558,12 @@ async fn test_summarized_delete_document_by_id() {
#[actix_web::test]
async fn test_summarized_settings_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
// here we should find my payload even in the failed task.
let (response, code) = index.update_settings(json!({ "rankingRules": ["custom"] })).await;
meili_snap::snapshot!(code, @"400 Bad Request");
meili_snap::snapshot!(meili_snap::json_string!(response), @r###"
snapshot!(code, @"400 Bad Request");
snapshot!(json_string!(response), @r###"
{
"message": "Invalid value at `.rankingRules[0]`: `custom` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules.",
"code": "invalid_settings_ranking_rules",
@ -571,13 +575,12 @@ async fn test_summarized_settings_update() {
let (task,_status_code) = index.update_settings(json!({ "displayedAttributes": ["doggos", "name"], "filterableAttributes": ["age", "nb_paw_pads"], "sortableAttributes": ["iq"] })).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "settingsUpdate",
"canceledBy": null,
@ -605,18 +608,17 @@ async fn test_summarized_settings_update() {
#[actix_web::test]
async fn test_summarized_index_creation() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexCreation",
"canceledBy": null,
@ -634,13 +636,12 @@ async fn test_summarized_index_creation() {
let (task, _status_code) = index.create(Some("doggos")).await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexCreation",
"canceledBy": null,
@ -648,7 +649,7 @@ async fn test_summarized_index_creation() {
"primaryKey": "doggos"
},
"error": {
"message": "Index `test` already exists.",
"message": "Index `[uuid]` already exists.",
"code": "index_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_already_exists"
@ -663,8 +664,8 @@ async fn test_summarized_index_creation() {
#[actix_web::test]
async fn test_summarized_index_deletion() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
let (ret, _code) = index.delete().await;
let task = index.wait_task(ret.uid()).await;
snapshot!(task,
@ -672,7 +673,7 @@ async fn test_summarized_index_deletion() {
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexDeletion",
"canceledBy": null,
@ -680,7 +681,7 @@ async fn test_summarized_index_deletion() {
"deletedDocuments": 0
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -703,7 +704,7 @@ async fn test_summarized_index_deletion() {
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "documentAdditionOrUpdate",
"canceledBy": null,
@ -726,7 +727,7 @@ async fn test_summarized_index_deletion() {
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexDeletion",
"canceledBy": null,
@ -749,7 +750,7 @@ async fn test_summarized_index_deletion() {
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "test",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexDeletion",
"canceledBy": null,
@ -757,7 +758,7 @@ async fn test_summarized_index_deletion() {
"deletedDocuments": 0
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -772,19 +773,18 @@ async fn test_summarized_index_deletion() {
#[actix_web::test]
async fn test_summarized_index_update() {
let server = Server::new().await;
let index = server.index("test");
let server = Server::new_shared();
let index = server.unique_index();
// If the index doesn't exist yet, we should get errors with or without the primary key.
let (task, _status_code) = index.update(None).await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexUpdate",
"canceledBy": null,
@ -792,7 +792,7 @@ async fn test_summarized_index_update() {
"primaryKey": null
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -807,13 +807,12 @@ async fn test_summarized_index_update() {
let (task, _status_code) = index.update(Some("bones")).await;
index.wait_task(task.uid()).await.failed();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "failed",
"type": "indexUpdate",
"canceledBy": null,
@ -821,7 +820,7 @@ async fn test_summarized_index_update() {
"primaryKey": "bones"
},
"error": {
"message": "Index `test` not found.",
"message": "Index `[uuid]` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
@ -839,13 +838,12 @@ async fn test_summarized_index_update() {
let (task, _status_code) = index.update(None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 3,
"batchUid": 3,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexUpdate",
"canceledBy": null,
@ -863,13 +861,12 @@ async fn test_summarized_index_update() {
let (task, _status_code) = index.update(Some("bones")).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 4,
"batchUid": 4,
"indexUid": "test",
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "[uuid]",
"status": "succeeded",
"type": "indexUpdate",
"canceledBy": null,
@ -887,7 +884,7 @@ async fn test_summarized_index_update() {
#[actix_web::test]
async fn test_summarized_index_swap() {
let server = Server::new().await;
let server = Server::new_shared();
let (task, _status_code) = server
.index_swap(json!([
{ "indexes": ["doggos", "cattos"] }
@ -895,12 +892,11 @@ async fn test_summarized_index_swap() {
.await;
server.wait_task(task.uid()).await.failed();
let (task, _) = server.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "failed",
"type": "indexSwap",
@ -928,23 +924,25 @@ async fn test_summarized_index_swap() {
}
"###);
let (task, _code) = server.index("doggos").create(None).await;
let doggos_index = server.unique_index();
let (task, _code) = doggos_index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = server.index("cattos").create(None).await;
let cattos_index = server.unique_index();
let (task, _code) = cattos_index.create(None).await;
server.wait_task(task.uid()).await.succeeded();
let (task, _code) = server
.index_swap(json!([
{ "indexes": ["doggos", "cattos"] }
{ "indexes": [doggos_index.uid, cattos_index.uid] }
]))
.await;
server.wait_task(task.uid()).await.succeeded();
let (task, _) = server.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.indexes[0]" => "doggos", ".**.indexes[1]" => "cattos", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
{
"uid": 3,
"batchUid": 3,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "indexSwap",
@ -970,20 +968,21 @@ async fn test_summarized_index_swap() {
#[actix_web::test]
async fn test_summarized_task_cancelation() {
let server = Server::new().await;
let index = server.index("doggos");
let server = Server::new_shared();
let index = server.unique_index();
// to avoid being flaky we're only going to cancel an already finished task :(
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = server.cancel_tasks("uids=0").await;
let task_uid = task.uid();
index.wait_task(task_uid).await.succeeded();
let (task, _status_code) = server.cancel_tasks(format!("uids={task_uid}").as_str()).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(json_string!(task,
{ ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".**.originalFilter" => "[of]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
@r###"
{
"uid": 1,
"batchUid": 1,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "taskCancelation",
@ -991,7 +990,7 @@ async fn test_summarized_task_cancelation() {
"details": {
"matchedTasks": 1,
"canceledTasks": 0,
"originalFilter": "?uids=0"
"originalFilter": "[of]"
},
"error": null,
"duration": "[duration]",
@ -1004,20 +1003,19 @@ async fn test_summarized_task_cancelation() {
#[actix_web::test]
async fn test_summarized_task_deletion() {
let server = Server::new().await;
let index = server.index("doggos");
let server = Server::new_shared();
let index = server.unique_index();
// to avoid being flaky we're only going to delete an already finished task :(
let (task, _status_code) = index.create(None).await;
index.wait_task(task.uid()).await.succeeded();
let (task, _status_code) = server.delete_tasks("uids=0").await;
index.wait_task(task.uid()).await.succeeded();
let (task, _) = index.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 1,
"batchUid": 1,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "taskDeletion",
@ -1038,22 +1036,21 @@ async fn test_summarized_task_deletion() {
#[actix_web::test]
async fn test_summarized_dump_creation() {
let server = Server::new().await;
let server = Server::new_shared();
let (task, _status_code) = server.create_dump().await;
server.wait_task(task.uid()).await;
let (task, _) = server.get_task(task.uid()).await;
assert_json_snapshot!(task,
{ ".details.dumpUid" => "[dumpUid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
snapshot!(task,
@r###"
{
"uid": 0,
"batchUid": 0,
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": null,
"status": "succeeded",
"type": "dumpCreation",
"canceledBy": null,
"details": {
"dumpUid": "[dumpUid]"
"dumpUid": "[dump_uid]"
},
"error": null,
"duration": "[duration]",

View File

@ -6,8 +6,8 @@ use crate::vector::generate_default_user_provided_documents;
#[actix_rt::test]
async fn retrieve_binary_quantize_status_in_the_settings() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -65,8 +65,8 @@ async fn retrieve_binary_quantize_status_in_the_settings() {
#[actix_rt::test]
async fn binary_quantize_before_sending_documents() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -139,8 +139,8 @@ async fn binary_quantize_before_sending_documents() {
#[actix_rt::test]
async fn binary_quantize_after_sending_documents() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -226,8 +226,8 @@ async fn binary_quantize_after_sending_documents() {
#[actix_rt::test]
async fn try_to_disable_binary_quantization() {
let server = Server::new().await;
let index = server.index("doggo");
let server = Server::new_shared();
let index = server.unique_index();
let (response, code) = index
.update_settings(json!({
@ -256,11 +256,11 @@ async fn try_to_disable_binary_quantization() {
.await;
snapshot!(code, @"202 Accepted");
let ret = server.wait_task(response.uid()).await;
snapshot!(ret, @r#"
snapshot!(json_string!(ret, { ".uid" => "[uid]", ".batchUid" => "[batch_uid]", ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".finishedAt" => "[date]", ".startedAt" => "[date]" }), @r#"
{
"uid": "[uid]",
"batchUid": "[batch_uid]",
"indexUid": "doggo",
"indexUid": "[uuid]",
"status": "failed",
"type": "settingsUpdate",
"canceledBy": null,
@ -274,7 +274,7 @@ async fn try_to_disable_binary_quantization() {
}
},
"error": {
"message": "Index `doggo`: `.embedders.manual.binaryQuantized`: Cannot disable the binary quantization.\n - Note: Binary quantization is a lossy operation that cannot be reverted.\n - Hint: Add a new embedder that is non-quantized and regenerate the vectors.",
"message": "Index `[uuid]`: `.embedders.manual.binaryQuantized`: Cannot disable the binary quantization.\n - Note: Binary quantization is a lossy operation that cannot be reverted.\n - Hint: Add a new embedder that is non-quantized and regenerate the vectors.",
"code": "invalid_settings_embedders",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_settings_embedders"

View File

@ -1,11 +1,13 @@
use std::cmp::Ordering;
use heed::RoTxn;
use itertools::Itertools;
use roaring::RoaringBitmap;
use crate::score_details::{ScoreDetails, ScoreValue, ScoringStrategy};
use crate::search::new::{distinct_fid, distinct_single_docid};
use crate::search::SemanticSearch;
use crate::{MatchingWords, Result, Search, SearchResult};
use crate::{Index, MatchingWords, Result, Search, SearchResult};
struct ScoreWithRatioResult {
matching_words: MatchingWords,
@ -91,7 +93,10 @@ impl ScoreWithRatioResult {
keyword_results: Self,
from: usize,
length: usize,
) -> (SearchResult, u32) {
distinct: Option<&str>,
index: &Index,
rtxn: &RoTxn<'_>,
) -> Result<(SearchResult, u32)> {
#[derive(Clone, Copy)]
enum ResultSource {
Semantic,
@ -106,8 +111,9 @@ impl ScoreWithRatioResult {
vector_results.document_scores.len() + keyword_results.document_scores.len(),
);
let mut documents_seen = RoaringBitmap::new();
for ((docid, (main_score, _sub_score)), source) in vector_results
let distinct_fid = distinct_fid(distinct, index, rtxn)?;
let mut excluded_documents = RoaringBitmap::new();
for res in vector_results
.document_scores
.into_iter()
.zip(std::iter::repeat(ResultSource::Semantic))
@ -121,13 +127,33 @@ impl ScoreWithRatioResult {
compare_scores(left, right).is_ge()
},
)
// remove documents we already saw
.filter(|((docid, _), _)| documents_seen.insert(*docid))
// remove documents we already saw and apply distinct rule
.filter_map(|item @ ((docid, _), _)| {
if !excluded_documents.insert(docid) {
// the document was already added, or is indistinct from an already-added document.
return None;
}
if let Some(distinct_fid) = distinct_fid {
if let Err(error) = distinct_single_docid(
index,
rtxn,
distinct_fid,
docid,
&mut excluded_documents,
) {
return Some(Err(error));
}
}
Some(Ok(item))
})
// start skipping **after** the filter
.skip(from)
// take **after** skipping
.take(length)
{
let ((docid, (main_score, _sub_score)), source) = res?;
if let ResultSource::Semantic = source {
semantic_hit_count += 1;
}
@ -136,10 +162,24 @@ impl ScoreWithRatioResult {
document_scores.push(main_score);
}
(
// compute the set of candidates from both sets
let candidates = vector_results.candidates | keyword_results.candidates;
let must_remove_redundant_candidates = distinct_fid.is_some();
let candidates = if must_remove_redundant_candidates {
// patch-up the candidates to remove the indistinct documents, then add back the actual hits
let mut candidates = candidates - excluded_documents;
for docid in &documents_ids {
candidates.insert(*docid);
}
candidates
} else {
candidates
};
Ok((
SearchResult {
matching_words: keyword_results.matching_words,
candidates: vector_results.candidates | keyword_results.candidates,
candidates,
documents_ids,
document_scores,
degraded: vector_results.degraded | keyword_results.degraded,
@ -147,7 +187,7 @@ impl ScoreWithRatioResult {
| keyword_results.used_negative_operator,
},
semantic_hit_count,
)
))
}
}
@ -226,8 +266,15 @@ impl Search<'_> {
let keyword_results = ScoreWithRatioResult::new(keyword_results, 1.0 - semantic_ratio);
let vector_results = ScoreWithRatioResult::new(vector_results, semantic_ratio);
let (merge_results, semantic_hit_count) =
ScoreWithRatioResult::merge(vector_results, keyword_results, self.offset, self.limit);
let (merge_results, semantic_hit_count) = ScoreWithRatioResult::merge(
vector_results,
keyword_results,
self.offset,
self.limit,
search.distinct.as_deref(),
search.index,
search.rtxn,
)?;
assert!(merge_results.documents_ids.len() <= self.limit);
Ok((merge_results, Some(semantic_hit_count)))
}

View File

@ -4,7 +4,9 @@ use super::logger::SearchLogger;
use super::ranking_rules::{BoxRankingRule, RankingRuleQueryTrait};
use super::SearchContext;
use crate::score_details::{ScoreDetails, ScoringStrategy};
use crate::search::new::distinct::{apply_distinct_rule, distinct_single_docid, DistinctOutput};
use crate::search::new::distinct::{
apply_distinct_rule, distinct_fid, distinct_single_docid, DistinctOutput,
};
use crate::{Result, TimeBudget};
pub struct BucketSortOutput {
@ -35,16 +37,7 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
logger.ranking_rules(&ranking_rules);
logger.initial_universe(universe);
let distinct_field = match distinct {
Some(distinct) => Some(distinct),
None => ctx.index.distinct_field(ctx.txn)?,
};
let distinct_fid = if let Some(field) = distinct_field {
ctx.index.fields_ids_map(ctx.txn)?.id(field)
} else {
None
};
let distinct_fid = distinct_fid(distinct, ctx.index, ctx.txn)?;
if universe.len() < from as u64 {
return Ok(BucketSortOutput {

View File

@ -9,7 +9,7 @@ use crate::heed_codec::facet::{
FacetGroupKey, FacetGroupKeyCodec, FacetGroupValueCodec, FieldDocIdFacetCodec,
};
use crate::heed_codec::BytesRefCodec;
use crate::{Index, Result, SearchContext};
use crate::{FieldId, Index, Result, SearchContext};
pub struct DistinctOutput {
pub remaining: RoaringBitmap,
@ -121,3 +121,18 @@ pub fn facet_string_values<'a>(
fn facet_values_prefix_key(distinct: u16, id: u32) -> [u8; FID_SIZE + DOCID_SIZE] {
concat_arrays::concat_arrays!(distinct.to_be_bytes(), id.to_be_bytes())
}
pub fn distinct_fid(
query_distinct_field: Option<&str>,
index: &Index,
rtxn: &RoTxn<'_>,
) -> Result<Option<FieldId>> {
let distinct_field = match query_distinct_field {
Some(distinct) => Some(distinct),
None => index.distinct_field(rtxn)?,
};
let distinct_fid =
if let Some(field) = distinct_field { index.fields_ids_map(rtxn)?.id(field) } else { None };
Ok(distinct_fid)
}

View File

@ -28,6 +28,7 @@ use std::time::Duration;
use bucket_sort::{bucket_sort, BucketSortOutput};
use charabia::{Language, TokenizerBuilder};
use db_cache::DatabaseCache;
pub use distinct::{distinct_fid, distinct_single_docid};
use exact_attribute::ExactAttribute;
use graph_based_ranking_rule::{Exactness, Fid, Position, Proximity, Typo};
use heed::RoTxn;
@ -47,8 +48,7 @@ use sort::Sort;
use self::distinct::facet_string_values;
use self::geo_sort::GeoSort;
pub use self::geo_sort::Parameter as GeoSortParameter;
pub use self::geo_sort::Strategy as GeoSortStrategy;
pub use self::geo_sort::{Parameter as GeoSortParameter, Strategy as GeoSortStrategy};
use self::graph_based_ranking_rule::Words;
use self::interner::Interned;
use self::vector_sort::VectorSort;

View File

@ -72,7 +72,7 @@ fn test_2gram_simple() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -103,7 +103,7 @@ fn test_3gram_simple() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -153,7 +153,7 @@ fn test_no_disable_ngrams() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -179,7 +179,7 @@ fn test_2gram_prefix() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -208,7 +208,7 @@ fn test_3gram_prefix() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();
@ -260,7 +260,7 @@ fn test_disable_split_words() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();

View File

@ -151,7 +151,7 @@ fn test_no_typo() {
let index = create_index();
index
.update_settings(|s| {
s.set_autorize_typos(false);
s.set_authorize_typos(false);
})
.unwrap();

View File

@ -19,10 +19,7 @@ use crate::update::{
};
use crate::vector::settings::{EmbedderSource, EmbeddingSettings};
use crate::vector::EmbeddingConfigs;
use crate::{
db_snap, obkv_to_json, Filter, FilterableAttributesRule, Index, Search, SearchResult,
ThreadPoolNoAbortBuilder,
};
use crate::{db_snap, obkv_to_json, Filter, FilterableAttributesRule, Index, Search, SearchResult};
pub(crate) struct TempIndex {
pub inner: Index,
@ -62,15 +59,8 @@ impl TempIndex {
wtxn: &mut RwTxn<'t>,
documents: Mmap,
) -> Result<(), crate::error::Error> {
let local_pool;
let indexer_config = &self.indexer_config;
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let rtxn = self.inner.read_txn()?;
let db_fields_ids_map = self.inner.fields_ids_map(&rtxn)?;
@ -153,15 +143,8 @@ impl TempIndex {
wtxn: &mut RwTxn<'t>,
external_document_ids: Vec<String>,
) -> Result<(), crate::error::Error> {
let local_pool;
let indexer_config = &self.indexer_config;
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let rtxn = self.inner.read_txn()?;
let db_fields_ids_map = self.inner.fields_ids_map(&rtxn)?;
@ -231,15 +214,8 @@ fn aborting_indexation() {
let mut wtxn = index.inner.write_txn().unwrap();
let should_abort = AtomicBool::new(false);
let local_pool;
let indexer_config = &index.indexer_config;
let pool = match &indexer_config.thread_pool {
Some(pool) => pool,
None => {
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
&local_pool
}
};
let pool = &indexer_config.thread_pool;
let rtxn = index.inner.read_txn().unwrap();
let db_fields_ids_map = index.inner.fields_ids_map(&rtxn).unwrap();

View File

@ -54,6 +54,10 @@ impl ThreadPoolNoAbortBuilder {
ThreadPoolNoAbortBuilder::default()
}
pub fn new_for_indexing() -> ThreadPoolNoAbortBuilder {
ThreadPoolNoAbortBuilder::default().thread_name(|index| format!("indexing-thread:{index}"))
}
pub fn thread_name<F>(mut self, closure: F) -> Self
where
F: FnMut(usize) -> String + 'static,

View File

@ -33,7 +33,6 @@ use crate::documents::{obkv_to_object, DocumentsBatchReader};
use crate::error::{Error, InternalError};
use crate::index::{PrefixSearch, PrefixSettings};
use crate::progress::Progress;
use crate::thread_pool_no_abort::ThreadPoolNoAbortBuilder;
pub use crate::update::index_documents::helpers::CursorClonableMmap;
use crate::update::{
IndexerConfig, UpdateIndexingStep, WordPrefixDocids, WordPrefixIntegerDocids, WordsPrefixesFst,
@ -228,24 +227,7 @@ where
let possible_embedding_mistakes =
crate::vector::error::PossibleEmbeddingMistakes::new(&field_distribution);
let backup_pool;
let pool = match self.indexer_config.thread_pool {
Some(ref pool) => pool,
None => {
// We initialize a backup pool with the default
// settings if none have already been set.
#[allow(unused_mut)]
let mut pool_builder = ThreadPoolNoAbortBuilder::new();
#[cfg(test)]
{
pool_builder = pool_builder.num_threads(1);
}
backup_pool = pool_builder.build()?;
&backup_pool
}
};
let pool = &self.indexer_config.thread_pool;
// create LMDB writer channel
let (lmdb_writer_sx, lmdb_writer_rx): (

View File

@ -1,7 +1,7 @@
use grenad::CompressionType;
use super::GrenadParameters;
use crate::thread_pool_no_abort::ThreadPoolNoAbort;
use crate::{thread_pool_no_abort::ThreadPoolNoAbort, ThreadPoolNoAbortBuilder};
#[derive(Debug)]
pub struct IndexerConfig {
@ -9,9 +9,10 @@ pub struct IndexerConfig {
pub max_nb_chunks: Option<usize>,
pub documents_chunk_size: Option<usize>,
pub max_memory: Option<usize>,
pub max_threads: Option<usize>,
pub chunk_compression_type: CompressionType,
pub chunk_compression_level: Option<u32>,
pub thread_pool: Option<ThreadPoolNoAbort>,
pub thread_pool: ThreadPoolNoAbort,
pub max_positions_per_attributes: Option<u32>,
pub skip_index_budget: bool,
}
@ -27,16 +28,39 @@ impl IndexerConfig {
}
}
/// By default use only 1 thread for indexing in tests
#[cfg(test)]
pub fn default_thread_pool_and_threads() -> (ThreadPoolNoAbort, Option<usize>) {
let pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.num_threads(1)
.build()
.expect("failed to build default rayon thread pool");
(pool, Some(1))
}
#[cfg(not(test))]
pub fn default_thread_pool_and_threads() -> (ThreadPoolNoAbort, Option<usize>) {
let pool = ThreadPoolNoAbortBuilder::new_for_indexing()
.build()
.expect("failed to build default rayon thread pool");
(pool, None)
}
impl Default for IndexerConfig {
fn default() -> Self {
let (thread_pool, max_threads) = default_thread_pool_and_threads();
Self {
max_threads,
thread_pool,
log_every_n: None,
max_nb_chunks: None,
documents_chunk_size: None,
max_memory: None,
chunk_compression_type: CompressionType::None,
chunk_compression_level: None,
thread_pool: None,
max_positions_per_attributes: None,
skip_index_budget: false,
}

View File

@ -4,7 +4,7 @@ pub use self::concurrent_available_ids::ConcurrentAvailableIds;
pub use self::facet::bulk::FacetsUpdateBulk;
pub use self::facet::incremental::FacetsUpdateIncrementalInner;
pub use self::index_documents::*;
pub use self::indexer_config::IndexerConfig;
pub use self::indexer_config::{default_thread_pool_and_threads, IndexerConfig};
pub use self::new::ChannelCongestion;
pub use self::settings::{validate_embedding_settings, Setting, Settings};
pub use self::update_step::UpdateIndexingStep;

View File

@ -8,7 +8,7 @@ use hashbrown::HashMap;
use serde_json::Value;
use super::super::cache::BalancedCaches;
use super::facet_document::extract_document_facets;
use super::facet_document::{extract_document_facets, extract_geo_document};
use super::FacetKind;
use crate::fields_ids_map::metadata::Metadata;
use crate::filterable_attributes_rules::match_faceted_field;
@ -90,17 +90,12 @@ impl FacetedDocidsExtractor {
let mut cached_sorter = context.data.borrow_mut_or_yield();
let mut del_add_facet_value = DelAddFacetValue::new(&context.doc_alloc);
let docid = document_change.docid();
let res = match document_change {
DocumentChange::Deletion(inner) => extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
// Using a macro avoid borrowing the parameters as mutable in both closures at
// the same time by postponing their creation
macro_rules! facet_fn {
(del) => {
|fid: FieldId, meta: Metadata, depth: perm_json_p::Depth, value: &Value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
@ -114,91 +109,10 @@ impl FacetedDocidsExtractor {
depth,
value,
)
},
),
DocumentChange::Update(inner) => {
let has_changed = inner.has_changed_for_fields(
&mut |field_name| {
match_faceted_field(
field_name,
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
)
},
rtxn,
index,
context.db_fields_ids_map,
)?;
let has_changed_for_geo_fields =
inner.has_changed_for_geo_fields(rtxn, index, context.db_fields_ids_map)?;
if !has_changed && !has_changed_for_geo_fields {
return Ok(());
}
extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
BalancedCaches::insert_del_u32,
&mut del_add_facet_value,
DelAddFacetValue::insert_del,
docid,
fid,
meta,
filterable_attributes,
depth,
value,
)
},
)?;
extract_document_facets(
inner.merged(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
BalancedCaches::insert_add_u32,
&mut del_add_facet_value,
DelAddFacetValue::insert_add,
docid,
fid,
meta,
filterable_attributes,
depth,
value,
)
},
)
}
DocumentChange::Insertion(inner) => extract_document_facets(
inner.inserted(),
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
is_geo_enabled,
&mut |fid, meta, depth, value| {
};
(add) => {
|fid: FieldId, meta: Metadata, depth: perm_json_p::Depth, value: &Value| {
Self::facet_fn_with_options(
&context.doc_alloc,
cached_sorter.deref_mut(),
@ -212,12 +126,116 @@ impl FacetedDocidsExtractor {
depth,
value,
)
},
),
}
};
}
match document_change {
DocumentChange::Deletion(inner) => {
let mut del = facet_fn!(del);
extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut del,
)?;
if is_geo_enabled {
extract_geo_document(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut del,
)?;
}
}
DocumentChange::Update(inner) => {
let has_changed_for_facets = inner.has_changed_for_fields(
&mut |field_name| {
match_faceted_field(
field_name,
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
)
},
rtxn,
index,
context.db_fields_ids_map,
)?;
// 1. Maybe update doc
if has_changed_for_facets {
extract_document_facets(
inner.current(rtxn, index, context.db_fields_ids_map)?,
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut facet_fn!(del),
)?;
extract_document_facets(
inner.merged(rtxn, index, context.db_fields_ids_map)?,
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut facet_fn!(add),
)?;
}
// 2. Maybe update geo
if is_geo_enabled
&& inner.has_changed_for_geo_fields(rtxn, index, context.db_fields_ids_map)?
{
extract_geo_document(
inner.current(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut facet_fn!(del),
)?;
extract_geo_document(
inner.merged(rtxn, index, context.db_fields_ids_map)?,
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut facet_fn!(add),
)?;
}
}
DocumentChange::Insertion(inner) => {
let mut add = facet_fn!(add);
extract_document_facets(
inner.inserted(),
new_fields_ids_map.deref_mut(),
filterable_attributes,
sortable_fields,
asc_desc_fields,
distinct_field,
&mut add,
)?;
if is_geo_enabled {
extract_geo_document(
inner.inserted(),
inner.external_document_id(),
new_fields_ids_map.deref_mut(),
&mut add,
)?;
}
}
};
del_add_facet_value.send_data(docid, sender, &context.doc_alloc).unwrap();
res
Ok(())
}
#[allow(clippy::too_many_arguments)]

View File

@ -16,13 +16,11 @@ use crate::filterable_attributes_rules::match_faceted_field;
#[allow(clippy::too_many_arguments)]
pub fn extract_document_facets<'doc>(
document: impl Document<'doc>,
external_document_id: &str,
field_id_map: &mut GlobalFieldsIdsMap,
filterable_attributes: &[FilterableAttributesRule],
sortable_fields: &HashSet<String>,
asc_desc_fields: &HashSet<String>,
distinct_field: &Option<String>,
is_geo_enabled: bool,
facet_fn: &mut impl FnMut(FieldId, Metadata, perm_json_p::Depth, &Value) -> Result<()>,
) -> Result<()> {
// return the match result for the given field name.
@ -102,17 +100,24 @@ pub fn extract_document_facets<'doc>(
}
}
if is_geo_enabled {
if let Some(geo_value) = document.geo_field()? {
if let Some([lat, lng]) = extract_geo_coordinates(external_document_id, geo_value)? {
let ((lat_fid, lat_meta), (lng_fid, lng_meta)) = field_id_map
.id_with_metadata_or_insert("_geo.lat")
.zip(field_id_map.id_with_metadata_or_insert("_geo.lng"))
.ok_or(UserError::AttributeLimitReached)?;
Ok(())
}
facet_fn(lat_fid, lat_meta, perm_json_p::Depth::OnBaseKey, &lat.into())?;
facet_fn(lng_fid, lng_meta, perm_json_p::Depth::OnBaseKey, &lng.into())?;
}
pub fn extract_geo_document<'doc>(
document: impl Document<'doc>,
external_document_id: &str,
field_id_map: &mut GlobalFieldsIdsMap,
facet_fn: &mut impl FnMut(FieldId, Metadata, perm_json_p::Depth, &Value) -> Result<()>,
) -> Result<()> {
if let Some(geo_value) = document.geo_field()? {
if let Some([lat, lng]) = extract_geo_coordinates(external_document_id, geo_value)? {
let ((lat_fid, lat_meta), (lng_fid, lng_meta)) = field_id_map
.id_with_metadata_or_insert("_geo.lat")
.zip(field_id_map.id_with_metadata_or_insert("_geo.lng"))
.ok_or(UserError::AttributeLimitReached)?;
facet_fn(lat_fid, lat_meta, perm_json_p::Depth::OnBaseKey, &lat.into())?;
facet_fn(lng_fid, lng_meta, perm_json_p::Depth::OnBaseKey, &lng.into())?;
}
}

View File

@ -333,7 +333,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
self.primary_key = Setting::Set(primary_key);
}
pub fn set_autorize_typos(&mut self, val: bool) {
pub fn set_authorize_typos(&mut self, val: bool) {
self.authorize_typos = Setting::Set(val);
}

View File

@ -792,7 +792,7 @@ fn test_disable_typo() {
index
.update_settings_using_wtxn(&mut txn, |settings| {
settings.set_autorize_typos(false);
settings.set_authorize_typos(false);
})
.unwrap();